Warning: There is no cluster found. [ROHALOG:16908786] Automatic Release of Resource: Start cl_get_path[249]: '.' is in the current path. This use is accepted, even though the directory cannot be checked at run time. cl_get_path[249]: '.' is in the current path. This use is accepted, even though the directory cannot be checked at run time. cl_get_path[249]: '.' is in the current path. This use is accepted, even though the directory cannot be checked at run time. :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N Warning: There is no cluster found. :get_local_nodename[56] nodename='' :get_local_nodename[57] rc=255 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 255 != 0 )) :get_local_nodename[58] exit 255 cl_get_path[249]: '.' is in the current path. This use is accepted, even though the directory cannot be checked at run time. :clmanageroha[318] : version='@(#)' 5881272 43haes/usr/sbin/cluster/events/clmanageroha.sh, 61aha_r726, 2205A_aha726, May 16 2022 12:15 PM :clmanageroha[321] clodmget -n -f connection_type HACMPhmcparam :clmanageroha[321] CONN_TYPE=0 :clmanageroha[321] typeset -i CONN_TYPE :clmanageroha[323] clodmget -q name=' and object like POWERVS_*' -nf name HACMPnode :clmanageroha[323] 2> /dev/null :clmanageroha[323] [[ -n '' ]] :clmanageroha[326] export CONN_TYPE :clmanageroha[331] roha_session_open -o release -s -t :clmanageroha[roha_session_open:131] roha_session.id=18547048 :clmanageroha[roha_session_open:132] date :clmanageroha[roha_session_open:132] LC_ALL=C :clmanageroha[roha_session_open:132] roha_session_log 'Open session 18547048 at Sat Jan 28 16:40:08 KORST 2023' [ROHALOG:18547048:(0.094)] Open session 18547048 at Sat Jan 28 16:40:08 KORST 2023 :clmanageroha[roha_session_open:137] getopts :cso:l:t opt :clmanageroha[roha_session_open:146] roha_session.operation=release :clmanageroha[roha_session_open:137] getopts :cso:l:t opt :clmanageroha[roha_session_open:143] roha_session.systemmirror_mode=1 :clmanageroha[roha_session_open:137] getopts :cso:l:t opt :clmanageroha[roha_session_open:152] online_rgs_skip=1 :clmanageroha[roha_session_open:137] getopts :cso:l:t opt :clmanageroha[roha_session_open:163] [[ release != @(acquire|release|adjust) ]] :clmanageroha[roha_session_open:168] no_roha_apps=0 :clmanageroha[roha_session_open:168] typeset -i no_roha_apps :clmanageroha[roha_session_open:169] need_explicit_res_rel=0 :clmanageroha[roha_session_open:169] typeset -i need_explicit_res_rel :clmanageroha[roha_session_open:187] [[ -n '' ]] :clmanageroha[roha_session_open:188] [[ -z '' ]] :clmanageroha[roha_session_open:188] clmgr q roha ERROR: no cluster is defined. :clmanageroha[roha_session_open:188] [[ -z '' ]] :clmanageroha[roha_session_open:189] roha_session_log 'INFO: No ROHA configured on applications.\n' [ROHALOG:18547048:(0.578)] INFO: No ROHA configured on applications. [ROHALOG:18547048:(0.578)] :clmanageroha[roha_session_open:190] no_roha_apps=1 :clmanageroha[roha_session_open:195] read_tunables :clmanageroha[roha_session_open:196] echo '' :clmanageroha[roha_session_open:196] grep -q Usage: grep [-r] [-R] [-H] [-L] [-E|-F] [-c|-l|-q] [-insvxbhwyu] [-p[parasep]] -e pattern_list... [-f pattern_file...] [file...] Usage: grep [-r] [-R] [-H] [-L] [-E|-F] [-c|-l|-q] [-insvxbhwyu] [-p[parasep]] [-e pattern_list...] -f pattern_file... [file...] Usage: grep [-r] [-R] [-H] [-L] [-E|-F] [-c|-l|-q] [-insvxbhwyu] [-p[parasep]] pattern_list [file...] :clmanageroha[roha_session_open:197] (( 2 == 0 )) :clmanageroha[roha_session_open:202] (( 1 == 1 )) :clmanageroha[roha_session_open:203] roha_session_read_odm_dynresop DLPAR_MEM :clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_MEM -nf value HACMPdynresop :clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos :clmanageroha[roha_session_read_odm_dynresop:816] out='' :clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 :clmanageroha[roha_session_open:203] (( 0 == 0.00 )) :clmanageroha[roha_session_open:204] roha_session_read_odm_dynresop DLPAR_PROCS :clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_PROCS -nf value HACMPdynresop :clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos :clmanageroha[roha_session_read_odm_dynresop:816] out='' :clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 :clmanageroha[roha_session_open:204] (( 0 == 0 )) :clmanageroha[roha_session_open:205] roha_session_read_odm_dynresop DLPAR_PROC_UNITS :clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_PROC_UNITS -nf value HACMPdynresop :clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos :clmanageroha[roha_session_read_odm_dynresop:816] out='' :clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 :clmanageroha[roha_session_open:205] (( 0 == 0.00 )) :clmanageroha[roha_session_open:206] roha_session_log 'INFO: Nothing to be done.\n' [ROHALOG:18547048:(0.635)] INFO: Nothing to be done. [ROHALOG:18547048:(0.635)] :clmanageroha[roha_session_open:207] exit 0 [ROHALOG:16908786] Automatic Release of Resource: End rc.init: Removed /usr/es/sbin/cluster/.cthags.exit file. Warning: There is no cluster found. Jan 28 2023 17:10:20 EVENT START: admin_op clrm_start_request 28696 0 |2023-01-28T17:10:20|28696|EVENT START: admin_op clrm_start_request 28696 0| :admin_op[110] trap sigint_handler INT :admin_op[116] OP_TYPE=clrm_start_request :admin_op[116] typeset OP_TYPE :admin_op[117] SERIAL=28696 :admin_op[117] typeset -li SERIAL :admin_op[118] INVALID=0 :admin_op[118] typeset -li INVALID The administrator initiated the following action at Sat Jan 28 17:10:20 KORST 2023 Check smit.log and clutils.log for additional details. Starting PowerHA cluster services on node: epprda in normal mode... Jan 28 2023 17:10:23 EVENT COMPLETED: admin_op clrm_start_request 28696 0 0 |2023-01-28T17:10:23|28696|EVENT COMPLETED: admin_op clrm_start_request 28696 0 0| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 28697 Cluster services started on node 'epprda' Enqueued rg_move acquire event for resource group epprd_rg. Node Up Completion Event has been enqueued. ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_NODE_DEP|2023-01-28T17:10:25|28697| |CLUSTER_RG_MOVE_ACQUIRE|epprd_rg| |NODE_UP_COMPLETE| |EVENT_PREAMBLE_END| Jan 28 2023 17:10:27 EVENT START: node_up epprda |2023-01-28T17:10:27|28697|EVENT START: node_up epprda| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T17:10:27.662239 + echo '|2023-01-28T17:10:27.662239|INFO: node_up|epprda' + 1>> /var/hacmp/availability/clavailability.log :node_up[182] version=%I% :node_up[185] NODENAME=epprda :node_up[185] export NODENAME :node_up[193] STATUS=0 :node_up[193] typeset -li STATUS :node_up[194] RC=0 :node_up[194] typeset -li RC :node_up[195] ENABLE_NFS_CROSS_MOUNT=false :node_up[196] START_MODE='' :node_up[196] typeset START_MODE :node_up[198] set -u :node_up[200] (( 1 < 1 )) :node_up[200] (( 1 > 2 )) :node_up[207] : serial number for this event is 28697 :node_up[210] [[ epprda == epprda ]] :node_up[213] : Remove the node halt lock file. :node_up[214] : Hereafter, clstrmgr failure leads to node halt :node_up[216] rm -f /usr/es/sbin/cluster/etc/ha_nodehalt.lock :node_up[219] (( 1 > 1 )) :node_up[256] : If RG_DEPENDENCIES=false, process RGs with clsetenvgrp :node_up[258] [[ TRUE == FALSE ]] :node_up[281] : localnode processing prior to RG acquisition :node_up[283] [[ epprda == epprda ]] :node_up[283] [[ '' != forced ]] :node_up[286] : Reserve Volume Groups using SCSIPR :node_up[288] clodmget -n -q policy=scsi -f value HACMPsplitmerge :node_up[288] SCSIPR_ENABLED='' :node_up[288] typeset SCSIPR_ENABLED :node_up[289] [[ '' == Yes ]] :node_up[334] : Setup VG fencing. This must be done prior to any potential disk access. :node_up[336] node_up_vg_fence_init :node_up[node_up_vg_fence_init:73] typeset VGs_on_line :node_up[node_up_vg_fence_init:74] typeset VG_name :node_up[node_up_vg_fence_init:75] typeset VG_ID :node_up[node_up_vg_fence_init:76] typeset VG_PV_list :node_up[node_up_vg_fence_init:79] : Find out what volume groups are currently on-line :node_up[node_up_vg_fence_init:81] lsvg -L -o :node_up[node_up_vg_fence_init:81] 2> /var/hacmp/log/node_up.lsvg.err :node_up[node_up_vg_fence_init:81] print caavg_private rootvg :node_up[node_up_vg_fence_init:81] VGs_on_line='caavg_private rootvg' :node_up[node_up_vg_fence_init:82] [[ -e /var/hacmp/log/node_up.lsvg.err ]] :node_up[node_up_vg_fence_init:82] [[ ! -s /var/hacmp/log/node_up.lsvg.err ]] :node_up[node_up_vg_fence_init:82] rm /var/hacmp/log/node_up.lsvg.err :node_up[node_up_vg_fence_init:85] : Clean up any old fence group files and stale fence groups. :node_up[node_up_vg_fence_init:86] : These are all of the form '/usr/es/sbin/cluster/etc/vg/.uud' :node_up[node_up_vg_fence_init:88] valid_vg_lst='' :node_up[node_up_vg_fence_init:89] lsvg -L :node_up[node_up_vg_fence_init:89] egrep -vw 'rootvg|caavg_private' :node_up[node_up_vg_fence_init:89] 2>> /var/hacmp/log/node_up.lsvg.err :node_up:datavg[node_up_vg_fence_init:91] PS4_LOOP=datavg :node_up:datavg[node_up_vg_fence_init:92] clodmget -q $'name like \'*VOLUME_GROUP\' and value = datavg' -f value -n HACMPresource :node_up:datavg[node_up_vg_fence_init:92] [[ -z datavg ]] :node_up:datavg[node_up_vg_fence_init:109] : Volume group datavg is an HACMP resource :node_up:datavg[node_up_vg_fence_init:111] [[ 'caavg_private rootvg' == ?(*\ )datavg?(\ *) ]] :node_up:datavg[node_up_vg_fence_init:115] fence_height=ro :node_up:datavg[node_up_vg_fence_init:119] : Recreate the fence group to match current volume group membership :node_up:datavg[node_up_vg_fence_init:121] cl_vg_fence_redo -c datavg ro :cl_vg_fence_redo[52] version=1.3 :cl_vg_fence_redo[55] RC=0 :cl_vg_fence_redo[55] typeset -li RC :cl_vg_fence_redo[58] : Check for optional -c parameter :cl_vg_fence_redo[60] [[ -c == -c ]] :cl_vg_fence_redo[62] c_flag=-c :cl_vg_fence_redo[63] shift :cl_vg_fence_redo[66] VG=datavg :cl_vg_fence_redo[67] UUID_file=/usr/es/sbin/cluster/etc/vg/datavg.uuid :cl_vg_fence_redo[68] fence_height=ro :cl_vg_fence_redo[70] [[ -s /usr/es/sbin/cluster/etc/vg/datavg.uuid ]] :cl_vg_fence_redo[83] [[ -z ro ]] :cl_vg_fence_redo[98] : Rebuild the fence group for datavg :cl_vg_fence_redo[99] : First, find the disks in the volume group :cl_vg_fence_redo[101] /usr/sbin/getlvodm -v datavg :cl_vg_fence_redo[101] VGID=00c44af100004b00000001851e9dc053 :cl_vg_fence_redo[103] [[ -n 00c44af100004b00000001851e9dc053 ]] :cl_vg_fence_redo[106] : Create a fence group for datavg :cl_vg_fence_redo[108] /usr/sbin/getlvodm -w 00c44af100004b00000001851e9dc053 :cl_vg_fence_redo[108] cut -f2 '-d ' :cl_vg_fence_redo[108] PV_disk_list=$'hdisk2\nhdisk3\nhdisk4\nhdisk5\nhdisk6\nhdisk7\nhdisk8' :cl_vg_fence_redo[109] cl_vg_fence_init -c datavg ro hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8 cl_vg_fence_init[145]: version @(#) 7d4c34b 43haes/usr/sbin/cluster/events/utils/cl_vg_fence_init.c, 726, 2147A_aha726, Feb 05 2021 09:50 PM cl_vg_fence_init[204]: odm_initialize() cl_vg_fence_init[231]: calloc(7, 64) cl_vg_fence_init[259]: getattr(hdisk2, PCM) = PCM/friend/fcpother cl_vg_fence_init[259]: getattr(hdisk3, PCM) = PCM/friend/fcpother cl_vg_fence_init[259]: getattr(hdisk4, PCM) = PCM/friend/fcpother cl_vg_fence_init[259]: getattr(hdisk5, PCM) = PCM/friend/fcpother cl_vg_fence_init[259]: getattr(hdisk6, PCM) = PCM/friend/fcpother cl_vg_fence_init[259]: getattr(hdisk7, PCM) = PCM/friend/fcpother cl_vg_fence_init[259]: getattr(hdisk8, PCM) = PCM/friend/fcpother cl_vg_fence_init[294]: sfwAddFenceGroup(datavg, 7, hdisk2, hdisk3, hdisk4, hdisk5, hdisk6, hdisk7, hdisk8) cl_vg_fence_init[374]: free(200101b8) cl_vg_fence_init[400]: creat(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_vg_fence_init[408]: write(/usr/es/sbin/cluster/etc/vg/datavg.uuid, 16) cl_g_fence_init[442]: sfwSetFenceGroup(vg=datavg, height=ro(2) uuid=ec2db4422261eae02091227fb9e53c88):cl_vg_fence_redo[110] RC=0 :cl_vg_fence_redo[111] : Exit status is 0 from cl_vg_fence_init datavg ro hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8 :cl_vg_fence_redo[113] (( 0 != 0 )) :cl_vg_fence_redo[123] return 0 :node_up:datavg[node_up_vg_fence_init:122] valid_vg_lst=' datavg' :node_up:datavg[node_up_vg_fence_init:125] [[ -e /var/hacmp/log/node_up.lsvg.err ]] :node_up:datavg[node_up_vg_fence_init:125] [[ ! -s /var/hacmp/log/node_up.lsvg.err ]] :node_up:datavg[node_up_vg_fence_init:125] rm /var/hacmp/log/node_up.lsvg.err :node_up:datavg[node_up_vg_fence_init:128] : Any remaining old fence group files are from stale fence groups, :node_up:datavg[node_up_vg_fence_init:129] : so remove them :node_up:datavg[node_up_vg_fence_init:131] [[ -s /usr/es/sbin/cluster/etc/vg/datavg.uuid ]] :node_up:datavg[node_up_vg_fence_init:133] ls /usr/es/sbin/cluster/etc/vg/datavg.uuid :node_up:/usr/es/sbin/cluster/etc/vg/datavg.uuid[node_up_vg_fence_init:135] PS4_LOOP=/usr/es/sbin/cluster/etc/vg/datavg.uuid :node_up:/usr/es/sbin/cluster/etc/vg/datavg.uuid[node_up_vg_fence_init:136] VG_name=datavg.uuid :node_up:/usr/es/sbin/cluster/etc/vg/datavg.uuid[node_up_vg_fence_init:137] VG_name=datavg :node_up:/usr/es/sbin/cluster/etc/vg/datavg.uuid[node_up_vg_fence_init:138] [[ ' datavg' == ?(*\ )datavg?(\ *) ]] :node_up:/usr/es/sbin/cluster/etc/vg/datavg.uuid[node_up_vg_fence_init:141] : Just redid the fence group for datavg :node_up:/usr/es/sbin/cluster/etc/vg/datavg.uuid[node_up_vg_fence_init:143] continue :node_up:/usr/es/sbin/cluster/etc/vg/datavg.uuid[node_up_vg_fence_init:158] unset PS4_LOOP :node_up[node_up_vg_fence_init:160] return 0 :node_up[344] : If WLM manager clases have been configured for an application server, process them now :node_up[346] clodmget -q $'name like \'WLM_*\'' -f id HACMPresource :node_up[346] [[ -n '' ]] :node_up[371] : Call ss-load replicated resource methods if they are defined :node_up[373] cl_rrmethods2call ss_load :cl_rrmethods2call[56] version=%I% :cl_rrmethods2call[84] RRMETHODS='' :cl_rrmethods2call[85] NEED_RR_ENV_VARS=no :cl_rrmethods2call[104] : The load and unload methods if defined are returned on the :cl_rrmethods2call[105] : local node :cl_rrmethods2call[107] [[ epprda == epprda ]] :cl_rrmethods2call[109] NEED_RR_ENV_VARS=yes :cl_rrmethods2call[129] : Set the '*_REP_RESOURCE' variables if needed. :cl_rrmethods2call[131] [[ yes == yes ]] :cl_rrmethods2call[133] cllsres :cl_rrmethods2call[133] 2> /dev/null :cl_rrmethods2call[133] eval APPLICATIONS='"epprd_app"' EXPORT_FILESYSTEM='"/board_org"' FILESYSTEM='""' FORCED_VARYON='"false"' FSCHECK_TOOL='"fsck"' FS_BEFORE_IPADDR='"false"' MOUNT_FILESYSTEM='"/board;/board_org"' RECOVERY_METHOD='"sequential"' SERVICE_LABEL='"epprd"' SSA_DISK_FENCING='"false"' VG_AUTO_IMPORT='"false"' VOLUME_GROUP='"datavg"' USERDEFINED_RESOURCES='""' :cl_rrmethods2call[1] APPLICATIONS=epprd_app :cl_rrmethods2call[1] EXPORT_FILESYSTEM=/board_org :cl_rrmethods2call[1] FILESYSTEM='' :cl_rrmethods2call[1] FORCED_VARYON=false :cl_rrmethods2call[1] FSCHECK_TOOL=fsck :cl_rrmethods2call[1] FS_BEFORE_IPADDR=false :cl_rrmethods2call[1] MOUNT_FILESYSTEM='/board;/board_org' :cl_rrmethods2call[1] RECOVERY_METHOD=sequential :cl_rrmethods2call[1] SERVICE_LABEL=epprd :cl_rrmethods2call[1] SSA_DISK_FENCING=false :cl_rrmethods2call[1] VG_AUTO_IMPORT=false :cl_rrmethods2call[1] VOLUME_GROUP=datavg :cl_rrmethods2call[1] USERDEFINED_RESOURCES='' :cl_rrmethods2call[137] [[ -n '' ]] :cl_rrmethods2call[142] [[ -n '' ]] :cl_rrmethods2call[147] [[ -n '' ]] :cl_rrmethods2call[152] [[ -n '' ]] :cl_rrmethods2call[157] [[ -n '' ]] :cl_rrmethods2call[162] [[ -n '' ]] :cl_rrmethods2call[167] [[ -n '' ]] :cl_rrmethods2call[172] [[ -n '' ]] :cl_rrmethods2call[182] [[ -z '' ]] :cl_rrmethods2call[184] typeset sysmgdata :cl_rrmethods2call[185] typeset reposmgdata :cl_rrmethods2call[186] [[ -x /usr/es/sbin/cluster/xd_generic/xd_cli/clxd_list_mg_smit ]] :cl_rrmethods2call[191] [[ -n '' ]] :cl_rrmethods2call[191] [[ -n '' ]] :cl_rrmethods2call[197] echo '' :cl_rrmethods2call[199] return 0 :node_up[373] METHODS='' :node_up[387] : When the local node is brought up, reset the resource locator info. :node_up[390] clchdaemons -r -d clstrmgr_scripts -t resource_locator :node_up[397] [[ '' != manual ]] :node_up[400] : attempt passive varyon for any ECM VGs in serial RGs :node_up[405] cl_pvo :cl_pvo[590] version=1.34.2.12 :cl_pvo(0.007)[592] PS4_TIMER=true :cl_pvo(0.007)[594] rc=0 :cl_pvo(0.007)[594] typeset -li rc :cl_pvo(0.007)[595] mode=0 :cl_pvo(0.007)[595] typeset -li mode :cl_pvo(0.007)[600] ENODEV=19 :cl_pvo(0.007)[600] typeset -li ENODEV :cl_pvo(0.007)[601] vg_force_on_flag='' :cl_pvo(0.007)[605] : Pick up any passed options :cl_pvo(0.007)[607] rg_list='' :cl_pvo(0.007)[607] export rg_list :cl_pvo(0.007)[608] vg_list='' :cl_pvo(0.007)[609] fs_list='' :cl_pvo(0.008)[610] all_vgs_flag='' :cl_pvo(0.008)[611] [[ -z '' ]] :cl_pvo(0.008)[613] all_vgs_flag=true :cl_pvo(0.008)[615] getopts :g:v:f: option :cl_pvo(0.008)[629] shift 0 :cl_pvo(0.008)[630] [[ -n '' ]] :cl_pvo(0.008)[645] O_flag='' :cl_pvo(0.008)[646] odmget -q 'attribute = varyon_state' PdAt :cl_pvo(0.010)[646] [[ -n $'\nPdAt:\n\tuniquetype = "logical_volume/vgsubclass/vgtype"\n\tattribute = "varyon_state"\n\tdeflt = "0"\n\tvalues = "0,1,2,3"\n\twidth = ""\n\ttype = "R"\n\tgeneric = ""\n\trep = "l"\n\tnls_index = 0' ]] :cl_pvo(0.010)[649] : LVM may record that a volume group was varied on from an earlier :cl_pvo(0.010)[650] : IPL. Rely on HA state tracking, and override the LVM check :cl_pvo(0.010)[652] O_flag=-O :cl_pvo(0.010)[655] [[ -n true ]] :cl_pvo(0.010)[657] [[ -z epprda ]] :cl_pvo(0.010)[661] [[ -z epprda ]] :cl_pvo(0.010)[672] : Since no resource names of any type were explicitly passed, go :cl_pvo(0.010)[673] : find all the resource groups this node is a member of. :cl_pvo(0.012)[675] clodmget -f group,nodes HACMPgroup :cl_pvo(0.015)[675] egrep '[: ]epprda( |$)' :cl_pvo(0.016)[675] cut -f1 -d: :cl_pvo(0.019)[675] rg_list=epprd_rg :cl_pvo(0.019)[676] [[ -z epprd_rg ]] :cl_pvo(0.019)[686] [[ -z '' ]] :cl_pvo(0.019)[686] [[ -n epprd_rg ]] :cl_pvo(0.019)[689] : Since no volume groups were passed, go find all the volume groups :cl_pvo(0.019)[690] : in the given/extracted list of resource groups. :cl_pvo(0.019)[695] : For each resource group that this node participates in, get the :cl_pvo(0.019)[696] : list of serial access volume groups in that resource group. :cl_pvo(0.019)[698] clodmget -q 'group = epprd_rg and name = VOLUME_GROUP' -f value -n HACMPresource :cl_pvo(0.022)[698] rg_vg_list=datavg :cl_pvo(0.022)[700] [[ -n datavg ]] :cl_pvo(0.022)[702] [[ -n true ]] :cl_pvo(0.022)[703] odmget -q $'group = epprd_rg and name like \'*REP_RESOURCE\'' HACMPresource :cl_pvo(0.024)[703] [[ -n '' ]] :cl_pvo(0.024)[739] : If there were any serial access volume groups for this node and :cl_pvo(0.024)[740] : that resource group, add them to the list. :cl_pvo(0.024)[742] vg_list=datavg :cl_pvo(0.024)[747] [[ -z '' ]] :cl_pvo(0.024)[747] [[ -n epprd_rg ]] :cl_pvo(0.024)[750] : Since no file systems were passed, go find all the file systems in :cl_pvo(0.024)[751] : the given/extracted list of resource groups. :cl_pvo(0.024)[755] : For each resource group that this node participates in, get the :cl_pvo(0.024)[756] : list of file systems in that resource group. :cl_pvo(0.024)[761] clodmget -q 'group = epprd_rg and name = FILESYSTEM' -f value -n HACMPresource :cl_pvo(0.027)[761] rg_fs_list=ALL :cl_pvo(0.027)[763] [[ -n ALL ]] :cl_pvo(0.027)[765] [[ -n true ]] :cl_pvo(0.027)[766] odmget -q $'group = epprd_rg and name like \'*REP_RESOURCE\'' HACMPresource :cl_pvo(0.029)[766] [[ -n '' ]] :cl_pvo(0.029)[780] : If there were any file systems for this node and that resource :cl_pvo(0.029)[781] : group, add them to the list :cl_pvo(0.029)[783] fs_list=ALL :cl_pvo(0.029)[790] [[ ALL == ALL ]] :cl_pvo(0.029)[792] continue :cl_pvo(0.029)[801] : Remove any duplicates from the volume group list :cl_pvo(0.031)[803] echo datavg :cl_pvo(0.033)[803] tr ' ' '\n' :cl_pvo(0.034)[803] sort -u :cl_pvo(0.038)[803] vg_list=datavg :cl_pvo(0.038)[805] [[ -z datavg ]] :cl_pvo(0.038)[814] : Find out what volume groups are currently on-line :cl_pvo(0.038)[816] lsvg -L -o :cl_pvo(0.039)[816] 2> /tmp/lsvg.err :cl_pvo(0.042)[816] print caavg_private rootvg :cl_pvo(0.042)[816] ON_LIST='caavg_private rootvg' :cl_pvo(0.042)[819] : If this node is the first node up in the cluster, :cl_pvo(0.042)[820] : we want to do a sync for each of the volume groups :cl_pvo(0.042)[821] : we bring on-line. If multiple cluster nodes are already active, the :cl_pvo(0.042)[822] : sync is unnecesary, having been done once, and possibly disruptive. :cl_pvo(0.042)[824] [[ -n '' ]] :cl_pvo(0.042)[833] : No other cluster nodes are present, default to sync just to be sure :cl_pvo(0.042)[834] : the volume group is in a good state :cl_pvo(0.042)[836] syncflag='' :cl_pvo(0.042)[840] : Now, process each volume group in the list of those this node acceses. :cl_pvo(0.042):datavg[844] PS4_LOOP=datavg :cl_pvo(0.042):datavg[844] typeset PS4_LOOP :cl_pvo(0.042):datavg[846] : Skip any concurrent GMVGs, they should never be pvo. :cl_pvo(0.043):datavg[848] odmget -q name='GMVG_REP_RESOURCE AND value=datavg' HACMPresource :cl_pvo(0.046):datavg[848] [[ -n '' ]] :cl_pvo(0.046):datavg[853] : The VGID is what the LVM low level commands used below use to :cl_pvo(0.046):datavg[854] : identify the volume group. :cl_pvo(0.046):datavg[856] /usr/sbin/getlvodm -v datavg :cl_pvo(0.049):datavg[856] vgid=00c44af100004b00000001851e9dc053 :cl_pvo(0.049):datavg[860] mode=99 :cl_pvo(0.049):datavg[863] : Attempt to determine the mode of the volume group - is it an :cl_pvo(0.049):datavg[864] : enhanced concurrent mode volume group or not. :cl_pvo(0.049):datavg[868] export mode :cl_pvo(0.049):datavg[869] hdisklist='' :cl_pvo(0.050):datavg[870] /usr/sbin/getlvodm -w 00c44af100004b00000001851e9dc053 :cl_pvo(0.052):datavg[870] read pvid hdisk :cl_pvo(0.052):datavg[871] hdisklist=hdisk2 :cl_pvo(0.052):datavg[870] read pvid hdisk :cl_pvo(0.052):datavg[871] hdisklist='hdisk2 hdisk3' :cl_pvo(0.052):datavg[870] read pvid hdisk :cl_pvo(0.052):datavg[871] hdisklist='hdisk2 hdisk3 hdisk4' :cl_pvo(0.052):datavg[870] read pvid hdisk :cl_pvo(0.052):datavg[871] hdisklist='hdisk2 hdisk3 hdisk4 hdisk5' :cl_pvo(0.052):datavg[870] read pvid hdisk :cl_pvo(0.052):datavg[871] hdisklist='hdisk2 hdisk3 hdisk4 hdisk5 hdisk6' :cl_pvo(0.052):datavg[870] read pvid hdisk :cl_pvo(0.052):datavg[871] hdisklist='hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7' :cl_pvo(0.052):datavg[870] read pvid hdisk :cl_pvo(0.052):datavg[871] hdisklist='hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8' :cl_pvo(0.052):datavg[870] read pvid hdisk :cl_pvo(0.052):datavg[873] get_vg_mode 'hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8' 00c44af100004b00000001851e9dc053 datavg :cl_pvo(0.052):datavg[get_vg_mode:289] typeset vgid vg_name syncflag hdisklist :cl_pvo(0.052):datavg[get_vg_mode:290] typeset GROUP_NAME FORCED_VARYON :cl_pvo(0.052):datavg[get_vg_mode:291] TUR_RC=0 :cl_pvo(0.052):datavg[get_vg_mode:291] typeset -li TUR_RC :cl_pvo(0.052):datavg[get_vg_mode:292] vg_disks=0 :cl_pvo(0.052):datavg[get_vg_mode:292] typeset -li vg_disks :cl_pvo(0.052):datavg[get_vg_mode:293] max_disk_test=0 :cl_pvo(0.052):datavg[get_vg_mode:293] typeset -li max_disk_test :cl_pvo(0.052):datavg[get_vg_mode:294] disk_tested=0 :cl_pvo(0.052):datavg[get_vg_mode:294] typeset -li disk_tested :cl_pvo(0.052):datavg[get_vg_mode:296] hdisklist='hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8' :cl_pvo(0.052):datavg[get_vg_mode:297] vgid=00c44af100004b00000001851e9dc053 :cl_pvo(0.052):datavg[get_vg_mode:298] vg_name=datavg :cl_pvo(0.052):datavg[get_vg_mode:299] syncflag='' :cl_pvo(0.052):datavg[get_vg_mode:301] odmget -q name='datavg and attribute=conc_capable and value=y' CuAt :cl_pvo(0.053):datavg[get_vg_mode:301] ODMDIR=/etc/objrepos :cl_pvo(0.055):datavg[get_vg_mode:301] [[ -n $'\nCuAt:\n\tname = "datavg"\n\tattribute = "conc_capable"\n\tvalue = "y"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "l"\n\tnls_index = 0' ]] :cl_pvo(0.055):datavg[get_vg_mode:304] : If LVM thinks that this volume group is concurrent capable, that :cl_pvo(0.055):datavg[get_vg_mode:305] : is good enough :cl_pvo(0.055):datavg[get_vg_mode:307] mode=32 :cl_pvo(0.055):datavg[get_vg_mode:308] return :cl_pvo(0.055):datavg[876] : See if the volume group is already on line. This should :cl_pvo(0.055):datavg[877] : only happen if it were manually brought on line outside of HACMP :cl_pvo(0.055):datavg[878] : control, or left on-line after a forced down. :cl_pvo(0.055):datavg[880] vg_on_mode='' :cl_pvo(0.055):datavg[880] typeset vg_on_mode :cl_pvo(0.055):datavg[881] [[ 'caavg_private rootvg' == ?(*\ )datavg?(\ *) ]] :cl_pvo(0.056):datavg[891] lsvg -L datavg :cl_pvo(0.056):datavg[891] 2> /dev/null :cl_pvo(0.058):datavg[891] grep -q -i -w passive-only :cl_pvo(0.071):datavg[896] [[ -n '' ]] :cl_pvo(0.071):datavg[976] : Volume group is currently not on line in any mode :cl_pvo(0.071):datavg[978] (( 99 == 32 )) :cl_pvo(0.071):datavg[1041] (( 32 != 32 && 99 != 32 )) :cl_pvo(0.071):datavg[1060] (( 32 == 32 )) :cl_pvo(0.071):datavg[1063] : If this is actually an enhanced concurrent mode volume group, :cl_pvo(0.071):datavg[1064] : bring it on line in passive mode. Other kinds are just skipped. :cl_pvo(0.071):datavg[1066] varyonp datavg 'hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8' :cl_pvo(0.071):datavg[varyonp:417] NOQUORUM=20 :cl_pvo(0.071):datavg[varyonp:417] typeset -li NOQUORUM :cl_pvo(0.071):datavg[varyonp:418] rc=0 :cl_pvo(0.071):datavg[varyonp:418] typeset -li rc :cl_pvo(0.071):datavg[varyonp:421] : Pick up passed parameters: volume group and sync flag :cl_pvo(0.071):datavg[varyonp:423] typeset syncflag hdisklist vg :cl_pvo(0.071):datavg[varyonp:424] vg=datavg :cl_pvo(0.071):datavg[varyonp:425] hdisklist='hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8' :cl_pvo(0.071):datavg[varyonp:426] syncflag='' :cl_pvo(0.071):datavg[varyonp:429] : Make sure the volume group is not fenced. Varyon requires read write :cl_pvo(0.071):datavg[varyonp:430] : access. :cl_pvo(0.071):datavg[varyonp:432] cl_set_vg_fence_height -c datavg rw cl_set_vg_fence_height[126]: version @(#)10 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 cl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY) cl_set_vg_fence_height[214]: read(datavg, 16) cl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0)) :cl_pvo(0.074):datavg[varyonp:433] RC=0 :cl_pvo(0.074):datavg[varyonp:434] (( 19 == 0 )) :cl_pvo(0.074):datavg[varyonp:442] : Return code from volume group fencing for datavg is 0 :cl_pvo(0.074):datavg[varyonp:443] (( 0 != 0 )) :cl_pvo(0.074):datavg[varyonp:455] : Try to vary on the volume group in passive concurrent mode :cl_pvo(0.074):datavg[varyonp:457] varyonvg -c -P -O datavg :cl_pvo(0.667):datavg[varyonp:458] rc=0 :cl_pvo(0.668):datavg[varyonp:460] (( 0 != 0 )) :cl_pvo(0.668):datavg[varyonp:483] : exit status of varyonvg -c -P -O datavg is: 0 :cl_pvo(0.668):datavg[varyonp:485] (( 0 == 20 )) :cl_pvo(0.668):datavg[varyonp:505] : If varyon was ultimately unsuccessful, note the error :cl_pvo(0.668):datavg[varyonp:507] (( 0 != 0 )) :cl_pvo(0.668):datavg[varyonp:511] : If varyonvg was successful, try to recover :cl_pvo(0.668):datavg[varyonp:512] : any missing or removed disks :cl_pvo(0.668):datavg[varyonp:514] mr_recovery datavg :cl_pvo(0.668):datavg[mr_recovery:59] vg=datavg :cl_pvo(0.668):datavg[mr_recovery:59] typeset vg :cl_pvo(0.668):datavg[mr_recovery:60] typeset mr_disks :cl_pvo(0.668):datavg[mr_recovery:61] typeset disk_list :cl_pvo(0.668):datavg[mr_recovery:62] typeset hdisk :cl_pvo(0.670):datavg[mr_recovery:64] lsvg -p datavg :cl_pvo(0.670):datavg[mr_recovery:64] 2> /dev/null :cl_pvo(0.672):datavg[mr_recovery:64] grep -iw missing :cl_pvo(0.691):datavg[mr_recovery:64] missing_disks='' :cl_pvo(0.691):datavg[mr_recovery:66] [[ -n '' ]] :cl_pvo(0.692):datavg[mr_recovery:89] lsvg -p datavg :cl_pvo(0.692):datavg[mr_recovery:89] 2> /dev/null :cl_pvo(0.695):datavg[mr_recovery:89] grep -iw removed :cl_pvo(0.713):datavg[mr_recovery:89] removed_disks='' :cl_pvo(0.713):datavg[mr_recovery:91] [[ -n '' ]] :cl_pvo(0.713):datavg[varyonp:518] : Restore the fence height to read only, for passive varyon :cl_pvo(0.713):datavg[varyonp:520] cl_set_vg_fence_height -c datavg ro cl_set_vg_fence_height[126]: version @(#)10 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 cl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY) cl_set_vg_fence_height[214]: read(datavg, 16) cl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=ro(2)) :cl_pvo(0.716):datavg[varyonp:521] RC=0 :cl_pvo(0.716):datavg[varyonp:522] : Return code from volume group fencing for datavg is 0 :cl_pvo(0.716):datavg[varyonp:523] (( 0 != 0 )) :cl_pvo(0.716):datavg[varyonp:533] return 0 :cl_pvo(0.716):datavg[1073] return 0 :node_up[406] : exit status of cl_pvo is: 0 :node_up[422] ls '/dev/vpath*' :node_up[422] 1> /dev/null 2>& 1 :node_up[432] : Configure any split and merge policies. :node_up[434] rm -f /usr/es/sbin/cluster/etc/smm_oflag :node_up[435] [[ -z '' ]] :node_up[438] : If this is the first node up, configure split merge handling. :node_up[440] cl_cfg_sm_rt :cl_cfg_sm_rt[738] version=1.34 :cl_cfg_sm_rt[741] clctrl_rc=0 :cl_cfg_sm_rt[741] typeset -li clctrl_rc :cl_cfg_sm_rt[742] src_rc=0 :cl_cfg_sm_rt[742] typeset -li src_rc :cl_cfg_sm_rt[743] cl_migcheck_rc=0 :cl_cfg_sm_rt[743] typeset -li cl_migcheck_rc :cl_cfg_sm_rt[744] bad_policy='' :cl_cfg_sm_rt[745] SMP='' :cl_cfg_sm_rt[748] : If we are in migration - if all nodes are not up to this level - do not :cl_cfg_sm_rt[749] : attempt any configuration. :cl_cfg_sm_rt[751] clmixver :cl_cfg_sm_rt[751] version=22 :cl_cfg_sm_rt[752] (( 22 < 14 )) :cl_cfg_sm_rt[761] : Retrieve configured policies :cl_cfg_sm_rt[763] clodmget -q 'policy = action' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[763] Action=Reboot :cl_cfg_sm_rt[764] clodmget -q 'policy = split' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[764] Split=None :cl_cfg_sm_rt[765] clodmget -q 'policy = merge' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[765] Merge=Majority :cl_cfg_sm_rt[766] clodmget -q 'policy = tiebreaker' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[766] TieBreaker='' :cl_cfg_sm_rt[767] clodmget -q 'policy = nfs_quorumserver' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[767] nfs_quorumserver='' :cl_cfg_sm_rt[768] clodmget -q 'policy = local_quorumdirectory' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[768] local_quorumdirectory='' :cl_cfg_sm_rt[769] clodmget -q 'policy = remote_quorumdirectory' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[769] remote_quorumdirectory='' :cl_cfg_sm_rt[770] clodmget -q 'policy = anhp' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[770] is_anhp='' :cl_cfg_sm_rt[771] clodmget -q 'policy = scsi' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[771] is_scsi='' :cl_cfg_sm_rt[772] clodmget -q name=clutils.log -f value -n HACMPlogs :cl_cfg_sm_rt[772] CLUTILS_LOG=/var/hacmp/log/clutils.log :cl_cfg_sm_rt[775] : If policies are unset, apply the default policies :cl_cfg_sm_rt[777] Split=None :cl_cfg_sm_rt[778] Merge=Majority :cl_cfg_sm_rt[779] Action=Reboot :cl_cfg_sm_rt[782] : If tiebreaker was a configured policy, be sure that one was defined :cl_cfg_sm_rt[784] [[ -z '' ]] :cl_cfg_sm_rt[786] [[ None == TieBreaker ]] :cl_cfg_sm_rt[790] [[ Majority == TieBreaker ]] :cl_cfg_sm_rt[795] [[ -n '' ]] :cl_cfg_sm_rt[807] : Set up the interlock file for use by smcaactrl. This tells :cl_cfg_sm_rt[808] : smcaactrl to allow the following CAA operations. :cl_cfg_sm_rt[810] date :cl_cfg_sm_rt[810] 1> /usr/es/sbin/cluster/etc/cl_cfg_sm_rt.26149292 :cl_cfg_sm_rt[811] trap 'on_exit $?' EXIT :cl_cfg_sm_rt[814] : Setting up CAA tunable local_merge_policy :cl_cfg_sm_rt[816] typeset -i caa_level :cl_cfg_sm_rt[817] lslpp -l bos.cluster.rte :cl_cfg_sm_rt[817] grep bos.cluster.rte :cl_cfg_sm_rt[817] uniq :cl_cfg_sm_rt[817] awk -F ' ' '{print $2}' :cl_cfg_sm_rt[817] tr -d . :cl_cfg_sm_rt[817] caa_level=725102 :cl_cfg_sm_rt[818] (( 725102 >=7140 )) :cl_cfg_sm_rt[819] configure_local_merge_policy :cl_cfg_sm_rt[configure_local_merge_policy:665] typeset -i clctrl_rc :cl_cfg_sm_rt[configure_local_merge_policy:666] [[ -z '' ]] :cl_cfg_sm_rt[configure_local_merge_policy:666] [[ -z '' ]] :cl_cfg_sm_rt[configure_local_merge_policy:667] capability=0 :cl_cfg_sm_rt[configure_local_merge_policy:667] typeset -i capability :cl_cfg_sm_rt[configure_local_merge_policy:669] cl_get_capabilities -i 6 :cl_cfg_sm_rt[configure_local_merge_policy:669] 2>& 1 :cl_cfg_sm_rt[configure_local_merge_policy:669] caa_sm_capability=$':cl_cfg_sm_rt[configure_local_merge_policy:669] LC_ALL=C\ncl_get_capabilities[178]: version 1.9\ncapability is 6\n\tid: 6 version: 1 flag: 1 ' :cl_cfg_sm_rt[configure_local_merge_policy:670] [[ -n $':cl_cfg_sm_rt[configure_local_merge_policy:669] LC_ALL=C\ncl_get_capabilities[178]: version 1.9\ncapability is 6\n\tid: 6 version: 1 flag: 1 ' ]] :cl_cfg_sm_rt[configure_local_merge_policy:674] : If Sub Cluster Split Merge capability is defined :cl_cfg_sm_rt[configure_local_merge_policy:675] : and globally available, then capability is set to 1 :cl_cfg_sm_rt[configure_local_merge_policy:677] capability='1 ' :cl_cfg_sm_rt[configure_local_merge_policy:680] (( 1 == 1 )) :cl_cfg_sm_rt[configure_local_merge_policy:682] : Sub Cluster Split-Merge capability is available cluster wide :cl_cfg_sm_rt[configure_local_merge_policy:684] [[ Majority != None ]] :cl_cfg_sm_rt[configure_local_merge_policy:686] clctrl -tune -o local_merge_policy=h 1 tunable updated on cluster epprda_cluster. :cl_cfg_sm_rt[configure_local_merge_policy:687] clctrl_rc=0 :cl_cfg_sm_rt[configure_local_merge_policy:688] (( 0 != 0 )) :cl_cfg_sm_rt[configure_local_merge_policy:725] return 0 :cl_cfg_sm_rt[820] rc=0 :cl_cfg_sm_rt[820] typeset -i rc :cl_cfg_sm_rt[821] (( 0 < 0 )) :cl_cfg_sm_rt[827] : Configure CAA in accordance with the specified or defaulted policies :cl_cfg_sm_rt[828] : for Merge :cl_cfg_sm_rt[830] clctrl -tune -a epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).communication_mode = u epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).config_timeout = 240 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).deadman_mode = a epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).dr_enabled = 1 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).link_timeout = 30000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).local_merge_policy = h epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).network_fdt = 20000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).no_if_traffic_monitor = 0 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).node_down_delay = 10000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).node_timeout = 30000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).packet_ttl = 32 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).remote_hb_factor = 1 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).repos_mode = e epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).site_merge_policy = p :cl_cfg_sm_rt[831] clctrl_rc=0 :cl_cfg_sm_rt[832] : Return code from 'clctrl -tune -a' is 0 :cl_cfg_sm_rt[835] : If the current deadman mode is not set to ASSERT, :cl_cfg_sm_rt[836] : change it to that :cl_cfg_sm_rt[842] clctrl -tune -x deadman_mode :cl_cfg_sm_rt[842] cut -f2 -d: :cl_cfg_sm_rt[842] current_deadman_mode=a :cl_cfg_sm_rt[843] [[ a != a ]] :cl_cfg_sm_rt[849] : Determine the current site merge policy, to see if it needs :cl_cfg_sm_rt[850] : to be changed :cl_cfg_sm_rt[852] clctrl -tune -x site_merge_policy :cl_cfg_sm_rt[852] cut -f2 -d: :cl_cfg_sm_rt[852] current_merge_policy=p :cl_cfg_sm_rt[854] [[ Majority == Manual ]] :cl_cfg_sm_rt[865] [[ Majority == None ]] :cl_cfg_sm_rt[878] : Everything else - tie breaker, majority, nfs - is heuristic merge policy :cl_cfg_sm_rt[880] [[ p != h ]] :cl_cfg_sm_rt[882] SMP=h :cl_cfg_sm_rt[883] clctrl -tune -o site_merge_policy=h 1 tunable updated on cluster epprda_cluster. :cl_cfg_sm_rt[886] clctrl_rc=0 :cl_cfg_sm_rt[887] (( 0 != 0 )) :cl_cfg_sm_rt[901] [[ -n h ]] :cl_cfg_sm_rt[904] : Make sure all instances of CAA across the cluster got the word :cl_cfg_sm_rt[906] /usr/es/sbin/cluster/cspoc/cli_on_cluster -S clctrl -tune -x site_merge_policy :cl_cfg_sm_rt[906] sort -u :cl_cfg_sm_rt[906] cut -f3 -d: clhaver[576]: version 1.14 clhaver[591]: colon delimied output clhaver[612]: MINVER=5100 clhaver[624]: thread(epprda) clhaver[144]: cl_gethostbynode epprda cl_gethostbynode[102]: version 1.1 i_flag=0 given name is epprda cl_gethostbynode[127]: cl_query nodes=2 cl_gethostbynode[161]: epprda is a PowerHA node name cl_gethostbynode[313]: epprda is the CAA host matching PowerHA node epprda clhaver[157]: node epprda resolves to epprda clhaver[166]: cl_socket(COLLVER epprda epprda) clhaver[191]: cl_connect(epprda) clhaver[230]: read(epprda) clhaver[624]: thread(epprds) clhaver[144]: cl_gethostbynode epprds cl_gethostbynode[102]: version 1.1 i_flag=0 given name is epprds cl_gethostbynode[127]: cl_query nodes=2 cl_gethostbynode[161]: epprds is a PowerHA node name cl_gethostbynode[313]: epprds is the CAA host matching PowerHA node epprds clhaver[157]: node epprds resolves to epprds clhaver[166]: cl_socket(COLLVER epprds epprds) clhaver[191]: cl_connect(epprds) clhaver[230]: read(epprds) epprda: :cl_rsh[99] version=1.4 epprda: :cl_rsh[102] CAA_node_name='' epprda: :cl_rsh[105] : Process optional flags epprda: :cl_rsh[107] cmd_flag=-n epprda: :cl_rsh[108] [[ -n == -n ]] epprda: :cl_rsh[111] : Remove the no standard input flag epprda: :cl_rsh[113] shift epprda: :cl_rsh[124] : Pick up and check the input epprda: :cl_rsh[126] print 'epprda /usr/es/sbin/cluster/cspoc/cexec eval gdgmgdhehcgmcacnhehfgogfcacnhicahdgjhegffpgngfhcghgffphagpgmgjgdhj' epprda: :cl_rsh[126] read destination command epprda: :cl_rsh[127] [[ -z epprda ]] epprda: :cl_rsh[127] [[ -z '/usr/es/sbin/cluster/cspoc/cexec eval gdgmgdhehcgmcacnhehfgogfcacnhicahdgjhegffpgngfhcghgffphagpgmgjgdhj' ]] epprda: :cl_rsh[136] /usr/es/sbin/cluster/utilities/cl_nn2hn epprda epprda: :cl_nn2hn[83] version=1.11 epprda: :cl_nn2hn[86] CAA_host_name='' epprda: :cl_nn2hn[86] typeset CAA_host_name epprda: :cl_nn2hn[87] node_name='' epprda: :cl_nn2hn[87] typeset node_name epprda: :cl_nn2hn[88] node_interfaces='' epprda: :cl_nn2hn[88] typeset node_interfaces epprda: :cl_nn2hn[89] COMM_PATH='' epprda: :cl_nn2hn[89] typeset COMM_PATH epprda: :cl_nn2hn[90] r_flag='' epprda: :cl_nn2hn[90] typeset r_flag epprda: :cl_nn2hn[93] : Pick up and check the input epprda: :cl_nn2hn[95] getopts r option epprda: :cl_nn2hn[106] : Pick up the destination, which follows the options epprda: :cl_nn2hn[108] shift 0 epprda: :cl_nn2hn[109] destination=epprda epprda: :cl_nn2hn[109] typeset destination epprda: :cl_nn2hn[111] [[ -z epprda ]] epprda: :cl_nn2hn[121] : In order to prevent recursion, first you must prevent recursion... epprda: :cl_nn2hn[123] [[ '' != TRUE ]] epprda: :cl_nn2hn[126] : This routine is not being called from cl_query_hn_id, so call it epprda: :cl_nn2hn[127] : to see if it can find the CAA host name based on a common short epprda: :cl_nn2hn[128] : id, or match on CAA host name, or match on CAA short name, or epprda: :cl_nn2hn[129] : similar match in /etc/cluster/rhosts. epprda: :cl_nn2hn[131] cl_query_hn_id -q -i epprda epprda: cl_query_hn_id[137]: version 1.2 epprda: cl_gethostbynode[102]: version 1.1 i_flag=105 given name is epprda epprda: cl_gethostbynode[127]: cl_query nodes=2 epprda: cl_gethostbynode[161]: epprda is a PowerHA node name epprda: cl_gethostbynode[313]: epprda is the CAA host matching PowerHA node epprda epprda: :cl_nn2hn[131] CAA_host_name=epprda epprda: :cl_nn2hn[132] RC=0 epprda: :cl_nn2hn[133] (( 0 == 0 )) epprda: :cl_nn2hn[136] : The straight forward tests worked! epprda: :cl_nn2hn[138] [[ epprda == @(+([0-9.])|+([0-9:])) ]] epprda: :cl_nn2hn[159] [[ -z epprda ]] epprda: :cl_nn2hn[340] [[ -z epprda ]] epprda: :cl_nn2hn[345] [[ -n epprda ]] epprda: :cl_nn2hn[348] : We have found epprda is our best guess at a CAA host name epprda: :cl_nn2hn[349] : corresponding to epprda epprda: :cl_nn2hn[351] print epprda epprda: :cl_nn2hn[352] return 0 epprda: :cl_rsh[136] CAA_node_name=epprda epprda: :cl_rsh[148] : Invoke clcomd epprda: :cl_rsh[150] /usr/sbin/clrsh epprda -n '/usr/es/sbin/cluster/cspoc/cexec eval gdgmgdhehcgmcacnhehfgogfcacnhicahdgjhegffpgngfhcghgffphagpgmgjgdhj' epprda: :cl_rsh[151] return 0 epprds: :cl_rsh[99] version=1.4 epprds: :cl_rsh[102] CAA_node_name='' epprds: :cl_rsh[105] : Process optional flags epprds: :cl_rsh[107] cmd_flag=-n epprds: :cl_rsh[108] [[ -n == -n ]] epprds: :cl_rsh[111] : Remove the no standard input flag epprds: :cl_rsh[113] shift epprds: :cl_rsh[124] : Pick up and check the input epprds: :cl_rsh[126] read destination command epprds: :cl_rsh[126] print 'epprds /usr/es/sbin/cluster/cspoc/cexec eval gdgmgdhehcgmcacnhehfgogfcacnhicahdgjhegffpgngfhcghgffphagpgmgjgdhj' epprds: :cl_rsh[127] [[ -z epprds ]] epprds: :cl_rsh[127] [[ -z '/usr/es/sbin/cluster/cspoc/cexec eval gdgmgdhehcgmcacnhehfgogfcacnhicahdgjhegffpgngfhcghgffphagpgmgjgdhj' ]] epprds: :cl_rsh[136] /usr/es/sbin/cluster/utilities/cl_nn2hn epprds epprds: :cl_nn2hn[83] version=1.11 epprds: :cl_nn2hn[86] CAA_host_name='' epprds: :cl_nn2hn[86] typeset CAA_host_name epprds: :cl_nn2hn[87] node_name='' epprds: :cl_nn2hn[87] typeset node_name epprds: :cl_nn2hn[88] node_interfaces='' epprds: :cl_nn2hn[88] typeset node_interfaces epprds: :cl_nn2hn[89] COMM_PATH='' epprds: :cl_nn2hn[89] typeset COMM_PATH epprds: :cl_nn2hn[90] r_flag='' epprds: :cl_nn2hn[90] typeset r_flag epprds: :cl_nn2hn[93] : Pick up and check the input epprds: :cl_nn2hn[95] getopts r option epprds: :cl_nn2hn[106] : Pick up the destination, which follows the options epprds: :cl_nn2hn[108] shift 0 epprds: :cl_nn2hn[109] destination=epprds epprds: :cl_nn2hn[109] typeset destination epprds: :cl_nn2hn[111] [[ -z epprds ]] epprds: :cl_nn2hn[121] : In order to prevent recursion, first you must prevent recursion... epprds: :cl_nn2hn[123] [[ '' != TRUE ]] epprds: :cl_nn2hn[126] : This routine is not being called from cl_query_hn_id, so call it epprds: :cl_nn2hn[127] : to see if it can find the CAA host name based on a common short epprds: :cl_nn2hn[128] : id, or match on CAA host name, or match on CAA short name, or epprds: :cl_nn2hn[129] : similar match in /etc/cluster/rhosts. epprds: :cl_nn2hn[131] cl_query_hn_id -q -i epprds epprds: cl_query_hn_id[137]: version 1.2 epprds: cl_gethostbynode[102]: version 1.1 i_flag=105 given name is epprds epprds: cl_gethostbynode[127]: cl_query nodes=2 epprds: cl_gethostbynode[161]: epprds is a PowerHA node name epprds: cl_gethostbynode[313]: epprds is the CAA host matching PowerHA node epprds epprds: :cl_nn2hn[131] CAA_host_name=epprds epprds: :cl_nn2hn[132] RC=0 epprds: :cl_nn2hn[133] (( 0 == 0 )) epprds: :cl_nn2hn[136] : The straight forward tests worked! epprds: :cl_nn2hn[138] [[ epprds == @(+([0-9.])|+([0-9:])) ]] epprds: :cl_nn2hn[159] [[ -z epprds ]] epprds: :cl_nn2hn[340] [[ -z epprds ]] epprds: :cl_nn2hn[345] [[ -n epprds ]] epprds: :cl_nn2hn[348] : We have found epprds is our best guess at a CAA host name epprds: :cl_nn2hn[349] : corresponding to epprds epprds: :cl_nn2hn[351] print epprds epprds: :cl_nn2hn[352] return 0 epprds: :cl_rsh[136] CAA_node_name=epprds epprds: :cl_rsh[148] : Invoke clcomd epprds: :cl_rsh[150] /usr/sbin/clrsh epprds -n '/usr/es/sbin/cluster/cspoc/cexec eval gdgmgdhehcgmcacnhehfgogfcacnhicahdgjhegffpgngfhcghgffphagpgmgjgdhj' epprds: :cl_rsh[151] return 0 :cl_cfg_sm_rt[906] [[ h != h ]] :cl_cfg_sm_rt[919] RSCT_START_RETRIES=0 :cl_cfg_sm_rt[919] typeset -li RSCT_START_RETRIES :cl_cfg_sm_rt[920] MIN_RSCT_RETRIES=1 :cl_cfg_sm_rt[920] typeset -li MIN_RSCT_RETRIES :cl_cfg_sm_rt[921] MAX_RSCT_RETRIES=15 :cl_cfg_sm_rt[921] typeset -li MAX_RSCT_RETRIES :cl_cfg_sm_rt[922] grep ^RSCT_START_RETRIES /etc/environment :cl_cfg_sm_rt[922] eval :cl_cfg_sm_rt[923] (( 0 < 1 )) :cl_cfg_sm_rt[923] RSCT_START_RETRIES=1 :cl_cfg_sm_rt[924] (( 1 > 15 )) :cl_cfg_sm_rt[926] RSCT_TB_WAITTIME=0 :cl_cfg_sm_rt[926] typeset -li RSCT_TB_WAITTIME :cl_cfg_sm_rt[927] grep ^RSCT_TB_WAITTIME /etc/environment :cl_cfg_sm_rt[927] eval :cl_cfg_sm_rt[928] (( 0 <= 0 )) :cl_cfg_sm_rt[928] RSCT_TB_WAITTIME=30 :cl_cfg_sm_rt[930] RSCT_START_WAIT=0 :cl_cfg_sm_rt[930] typeset -li RSCT_START_WAIT :cl_cfg_sm_rt[931] MIN_RSCT_WAIT=10 :cl_cfg_sm_rt[931] typeset -li MIN_RSCT_WAIT :cl_cfg_sm_rt[932] MAX_RSCT_WAIT=60 :cl_cfg_sm_rt[932] typeset -li MAX_RSCT_WAIT :cl_cfg_sm_rt[933] grep ^RSCT_START_WAIT /etc/environment :cl_cfg_sm_rt[933] eval :cl_cfg_sm_rt[934] (( 0 < 10 )) :cl_cfg_sm_rt[934] RSCT_START_WAIT=10 :cl_cfg_sm_rt[935] (( 10 > 60 )) :cl_cfg_sm_rt[937] (( retries=0)) :cl_cfg_sm_rt[937] (( 0 < 1)) :cl_cfg_sm_rt[939] lsrsrc IBM.PeerNode :cl_cfg_sm_rt[939] 1>> /var/hacmp/log/clutils.log 2>& 1 :cl_cfg_sm_rt[941] break :cl_cfg_sm_rt[947] (( 0 >= 1 )) :cl_cfg_sm_rt[954] : Configure RSCT in accordance with the specified or defaulted policies :cl_cfg_sm_rt[955] : for Split :cl_cfg_sm_rt[965] CT_MANAGEMENT_SCOPE=2 :cl_cfg_sm_rt[965] export CT_MANAGEMENT_SCOPE :cl_cfg_sm_rt[966] lsrsrc -t -c -x IBM.PeerNode OpQuorumTieBreaker :cl_cfg_sm_rt[966] Current_TB='"Success" ' :cl_cfg_sm_rt[967] Current_TB='"Success' :cl_cfg_sm_rt[968] Current_TB=Success :cl_cfg_sm_rt[969] [[ None == None ]] :cl_cfg_sm_rt[971] [[ Success == Success ]] :cl_cfg_sm_rt[973] chrsrc -c IBM.PeerNode OpQuorumTieBreaker=Operator :cl_cfg_sm_rt[974] src_rc=0 :cl_cfg_sm_rt[975] (( 0 != 0 )) :cl_cfg_sm_rt[981] (( 0 == 0 )) :cl_cfg_sm_rt[983] chrsrc -s Name='="Success"' IBM.TieBreaker PostReserveWaitTime=30 :cl_cfg_sm_rt[984] src_rc=0 :cl_cfg_sm_rt[985] (( 0 != 0 )) :cl_cfg_sm_rt[990] chrsrc -c IBM.PeerNode OpQuorumTieBreaker=Success :cl_cfg_sm_rt[991] src_rc=0 :cl_cfg_sm_rt[992] (( 0 != 0 )) :cl_cfg_sm_rt[1044] src_rc=0 :cl_cfg_sm_rt[1045] (( 0 != 0 )) :cl_cfg_sm_rt[1053] : Configure RSCT Action :cl_cfg_sm_rt[1055] chrsrc -c IBM.PeerNode QuorumType=4 :cl_cfg_sm_rt[1056] src_rc=0 :cl_cfg_sm_rt[1057] (( 0 != 0 )) :cl_cfg_sm_rt[1064] chrsrc -c IBM.PeerNode CriticalMode=2 :cl_cfg_sm_rt[1065] src_rc=0 :cl_cfg_sm_rt[1066] (( 0 != 0 )) :cl_cfg_sm_rt[1073] [[ Reboot == Reboot ]] :cl_cfg_sm_rt[1075] chrsrc -c IBM.PeerNode CritRsrcProtMethod=1 :cl_cfg_sm_rt[1077] src_rc=0 :cl_cfg_sm_rt[1078] (( 0 != 0 )) :cl_cfg_sm_rt[1086] : Configure RSCT Critical Resource Daemon Grace Period for cluster level. :cl_cfg_sm_rt[1088] typeset grace_period :cl_cfg_sm_rt[1089] clodmget -f crit_daemon_restart_grace_period HACMPcluster :cl_cfg_sm_rt[1089] grace_period=60 :cl_cfg_sm_rt[1090] lsrsrc -c IBM.PeerNode :cl_cfg_sm_rt[1090] LC_ALL=C :cl_cfg_sm_rt[1090] grep CritDaemonRestartGracePeriod :cl_cfg_sm_rt[1090] awk -F= '{print $2}' :cl_cfg_sm_rt[1090] rsct_grace_period=' -1' :cl_cfg_sm_rt[1091] [[ -n ' -1' ]] :cl_cfg_sm_rt[1092] (( -1 != 60 )) :cl_cfg_sm_rt[1093] chrsrc -c IBM.PeerNode CritDaemonRestartGracePeriod=60 :cl_cfg_sm_rt[1093] LC_ALL=C :cl_cfg_sm_rt[1094] chrsrc_rc=0 :cl_cfg_sm_rt[1095] (( 0 != 0 )) :cl_cfg_sm_rt[1104] : Configure RSCT Critical Resource Daemon Grace Period for node level. :cl_cfg_sm_rt[1106] typeset node_grace_period :cl_cfg_sm_rt[1107] typeset node_list :cl_cfg_sm_rt[1108] typeset rsct_node_grace_period :cl_cfg_sm_rt[1110] : Get the CAA active nodes list :cl_cfg_sm_rt[1112] lscluster -m :cl_cfg_sm_rt[1112] grep -p 'State of node: UP' :cl_cfg_sm_rt[1112] cut -f2 -d: :cl_cfg_sm_rt[1112] grep -w 'Node name:' :cl_cfg_sm_rt[1112] node_list=$' epprda\n epprds' :cl_cfg_sm_rt[1115] clodmget -n -q object='COMMUNICATION_PATH and value=epprda' -f name HACMPnode :cl_cfg_sm_rt[1115] host_name=epprda :cl_cfg_sm_rt[1116] clodmget -n -q object='CRIT_DAEMON_RESTART_GRACE_PERIOD and name=epprda' -f value HACMPnode :cl_cfg_sm_rt[1116] node_grace_period='' :cl_cfg_sm_rt[1117] [[ -n '' ]] :cl_cfg_sm_rt[1115] clodmget -n -q object='COMMUNICATION_PATH and value=epprds' -f name HACMPnode :cl_cfg_sm_rt[1115] host_name=epprds :cl_cfg_sm_rt[1116] clodmget -n -q object='CRIT_DAEMON_RESTART_GRACE_PERIOD and name=epprds' -f value HACMPnode :cl_cfg_sm_rt[1116] node_grace_period='' :cl_cfg_sm_rt[1117] [[ -n '' ]] :cl_cfg_sm_rt[1134] : Success exit. Display the CAA and RSCT configuration :cl_cfg_sm_rt[1136] clctrl -tune -a epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).communication_mode = u epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).config_timeout = 240 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).deadman_mode = a epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).dr_enabled = 1 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).link_timeout = 30000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).local_merge_policy = h epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).network_fdt = 20000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).no_if_traffic_monitor = 0 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).node_down_delay = 10000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).node_timeout = 30000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).packet_ttl = 32 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).remote_hb_factor = 1 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).repos_mode = e epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).site_merge_policy = h :cl_cfg_sm_rt[1137] lscluster -m Calling node query for all nodes... Node query number of nodes examined: 2 Node name: epprda Cluster shorthand id for node: 1 UUID for node: f42873b8-9ee2-11ed-8018-fae6134ea920 State of node: UP NODE_LOCAL Reason: NONE Smoothed rtt to node: 0 Mean Deviation in network rtt to node: 0 Number of clusters node is a member in: 1 CLUSTER NAME SHID UUID epprda_cluster 0 f43c91c2-9ee2-11ed-8018-fae6134ea920 SITE NAME SHID UUID LOCAL 1 51735173-5173-5173-5173-517351735173 Points of contact for node: 0 ---------------------------------------------------------------------------- Node name: epprds Cluster shorthand id for node: 2 UUID for node: f42873fe-9ee2-11ed-8018-fae6134ea920 State of node: UP Reason: NONE Smoothed rtt to node: 154 Mean Deviation in network rtt to node: 225 Number of clusters node is a member in: 1 CLUSTER NAME SHID UUID epprda_cluster 0 f43c91c2-9ee2-11ed-8018-fae6134ea920 SITE NAME SHID UUID LOCAL 1 51735173-5173-5173-5173-517351735173 Points of contact for node: 1 ----------------------------------------------------------------------- Interface State Protocol Status SRC_IP->DST_IP ----------------------------------------------------------------------- tcpsock->02 UP IPv4 none 61.81.244.134->61.81.244.123 :cl_cfg_sm_rt[1138] lsrsrc -x -A b IBM.PeerNode resource 1: Name = "epprds" NodeList = {2} RSCTVersion = "3.2.6.4" ClassVersions = {} CritRsrcProtMethod = 0 IsQuorumNode = 1 IsPreferredGSGL = 1 NodeUUID = "f42873fe-9ee2-11ed-8018-fae6134ea920" HostName = "epprds" TBPriority = 0 CritDaemonRestartGracePeriod = -1 ActivePeerDomain = "epprda_cluster" NodeNameList = {"epprds"} OpState = 1 ConfigChanged = 1 CritRsrcActive = 0 OpUsabilityState = 1 MaintenanceState = 0 resource 2: Name = "epprda" NodeList = {1} RSCTVersion = "3.2.6.4" ClassVersions = {} CritRsrcProtMethod = 0 IsQuorumNode = 1 IsPreferredGSGL = 1 NodeUUID = "f42873b8-9ee2-11ed-8018-fae6134ea920" HostName = "epprda" TBPriority = 0 CritDaemonRestartGracePeriod = -1 ActivePeerDomain = "epprda_cluster" NodeNameList = {"epprda"} OpState = 1 ConfigChanged = 1 CritRsrcActive = 0 OpUsabilityState = 1 MaintenanceState = 0 :cl_cfg_sm_rt[1139] lsrsrc -x -c -A b IBM.PeerNode resource 1: CommittedRSCTVersion = "3.2.2.0" ActiveVersionChanging = 0 OpQuorumOverride = 0 CritRsrcProtMethod = 1 OpQuorumTieBreaker = "Success" QuorumType = 4 QuorumGroupName = "" Fanout = 32 OpFenceGroup = "" NodeCleanupCommand = "" NodeCleanupCriteria = "" QuorumLessStartupTimeout = 120 CriticalMode = 2 NotifyQuorumChangedCommand = "" NamePolicy = 1 LiveUpdateOptions = "" QuorumNotificationRespWaitTime = 0 MaintenanceModeConfig = "" CritDaemonRestartGracePeriod = 60 :cl_cfg_sm_rt[1141] return 0 :cl_cfg_sm_rt[1] on_exit 0 :node_up[441] : exit status of cl_cfg_sm_rt is 0 :node_up[498] : Enable NFS crossmounts during manual start :node_up[500] [[ -n false ]] :node_up[500] [[ false == true ]] :node_up[607] : When RG dependencies are not configured we call node_up_local/remote, :node_up[608] : followed by process_resources to process any remaining groups :node_up[610] [[ TRUE == FALSE ]] :node_up[657] [[ epprda == epprda ]] :node_up[660] : Perform any deferred TCP daemon startup, if necessary, :node_up[661] : along with any necessary start up of iSCSI devices. :node_up[663] cl_telinit :cl_telinit[178] version=%I% :cl_telinit[182] TELINIT_FILE=/usr/es/sbin/cluster/.telinit :cl_telinit[183] USE_TELINIT_FILE=/usr/es/sbin/cluster/.use_telinit :cl_telinit[185] [[ -f /usr/es/sbin/cluster/.use_telinit ]] :cl_telinit[189] USE_TELINIT=0 :cl_telinit[198] [[ '' == -boot ]] :cl_telinit[236] cl_lsitab clinit :cl_telinit[236] 1> /dev/null 2>& 1 :cl_telinit[239] : telinit a disabled :cl_telinit[241] return 0 :node_up[664] : exit status of cl_telinit is: 0 :node_up[667] return 0 Jan 28 2023 17:10:31 EVENT COMPLETED: node_up epprda 0 |2023-01-28T17:10:31|28697|EVENT COMPLETED: node_up epprda 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T17:10:31.539193 + echo '|2023-01-28T17:10:31.539193|INFO: node_up|epprda|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 17:10:33 EVENT START: rg_move_fence epprda 1 |2023-01-28T17:10:33|28698|EVENT START: rg_move_fence epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T17:10:33.744597 + echo '|2023-01-28T17:10:33.744597|INFO: rg_move_fence|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_fence[62] [[ high == high ]] :rg_move_fence[62] version=1.11 :rg_move_fence[63] NODENAME=epprda :rg_move_fence[63] export NODENAME :rg_move_fence[65] set -u :rg_move_fence[67] [ 2 != 2 ] :rg_move_fence[73] set +u :rg_move_fence[75] [[ -z TRUE ]] :rg_move_fence[80] [[ TRUE == TRUE ]] :rg_move_fence[82] LOCAL_NODENAME=epprda :rg_move_fence[83] odmget -qid=1 HACMPgroup :rg_move_fence[83] egrep 'group =' :rg_move_fence[83] awk '{print $3}' :rg_move_fence[83] eval RGNAME='"epprd_rg"' :rg_move_fence[1] RGNAME=epprd_rg +epprd_rg:rg_move_fence[84] GROUPNAME=epprd_rg +epprd_rg:rg_move_fence[85] group_state='$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[86] set +u +epprd_rg:rg_move_fence[87] eval print '$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[1] print +epprd_rg:rg_move_fence[87] RG_MOVE_ONLINE='' +epprd_rg:rg_move_fence[87] export RG_MOVE_ONLINE +epprd_rg:rg_move_fence[88] set -u +epprd_rg:rg_move_fence[89] RG_MOVE_ONLINE=TMP_ERROR +epprd_rg:rg_move_fence[91] set -a +epprd_rg:rg_move_fence[92] clsetenvgrp epprda rg_move epprd_rg '' :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 +epprd_rg:rg_move_fence[92] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[93] RC=0 +epprd_rg:rg_move_fence[94] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[1] FORCEDOWN_GROUPS='' +epprd_rg:rg_move_fence[2] RESOURCE_GROUPS='' +epprd_rg:rg_move_fence[3] HOMELESS_GROUPS='' +epprd_rg:rg_move_fence[4] HOMELESS_FOLLOWER_GROUPS='' +epprd_rg:rg_move_fence[5] ERRSTATE_GROUPS='' +epprd_rg:rg_move_fence[6] PRINCIPAL_ACTIONS='' +epprd_rg:rg_move_fence[7] ASSOCIATE_ACTIONS='' +epprd_rg:rg_move_fence[8] AUXILLIARY_ACTIONS='' +epprd_rg:rg_move_fence[8] SIBLING_GROUPS='' +epprd_rg:rg_move_fence[9] SIBLING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[10] SIBLING_ACQUIRING_GROUPS='' +epprd_rg:rg_move_fence[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[12] SIBLING_RELEASING_GROUPS='' +epprd_rg:rg_move_fence[13] SIBLING_RELEASING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[95] set +a +epprd_rg:rg_move_fence[96] [ 0 -ne 0 ] +epprd_rg:rg_move_fence[103] process_resources FENCE :rg_move_fence[3318] version=1.169 :rg_move_fence[3321] STATUS=0 :rg_move_fence[3322] sddsrv_off=FALSE :rg_move_fence[3324] true :rg_move_fence[3326] : call rgpa, and it will tell us what to do next :rg_move_fence[3328] set -a :rg_move_fence[3329] clRGPA FENCE :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa FENCE 2023-01-28T17:10:33.848077 clrgpa :clRGPA[+55] exit 0 :rg_move_fence[3329] eval JOB_TYPE=NONE :rg_move_fence[1] JOB_TYPE=NONE :rg_move_fence[3330] RC=0 :rg_move_fence[3331] set +a :rg_move_fence[3333] (( 0 != 0 )) :rg_move_fence[3342] RESOURCE_GROUPS='' :rg_move_fence[3343] GROUPNAME='' :rg_move_fence[3343] export GROUPNAME :rg_move_fence[3353] IS_SERVICE_START=1 :rg_move_fence[3354] IS_SERVICE_STOP=1 :rg_move_fence[3360] [[ NONE == RELEASE ]] :rg_move_fence[3360] [[ NONE == ONLINE ]] :rg_move_fence[3729] break :rg_move_fence[3740] : If sddsrv was turned off above, turn it back on again :rg_move_fence[3742] [[ FALSE == TRUE ]] :rg_move_fence[3747] exit 0 +epprd_rg:rg_move_fence[104] : exit status of process_resources FENCE is: 0 +epprd_rg:rg_move_fence[107] [[ TRUE == TRUE ]] +epprd_rg:rg_move_fence[109] export EVENT_TYPE +epprd_rg:rg_move_fence[110] echo ACQUIRE_PRIMARY ACQUIRE_PRIMARY +epprd_rg:rg_move_fence[111] [[ -n '' ]] +epprd_rg:rg_move_fence[141] exit 0 Jan 28 2023 17:10:33 EVENT COMPLETED: rg_move_fence epprda 1 0 |2023-01-28T17:10:33|28698|EVENT COMPLETED: rg_move_fence epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T17:10:33.940449 + echo '|2023-01-28T17:10:33.940449|INFO: rg_move_fence|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 17:10:34 EVENT START: rg_move_acquire epprda 1 |2023-01-28T17:10:34|28698|EVENT START: rg_move_acquire epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T17:10:34.134605 + echo '|2023-01-28T17:10:34.134605|INFO: rg_move_acquire|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_acquire[+54] [[ high == high ]] :rg_move_acquire[+54] version=1.9.1.7 :rg_move_acquire[+57] set -u :rg_move_acquire[+59] [ 2 != 2 ] :rg_move_acquire[+65] set +u :rg_move_acquire[+67] :rg_move_acquire[+67] clodmget -n -q id=1 -f group HACMPgroup RG=epprd_rg :rg_move_acquire[+68] export RG :rg_move_acquire[+70] [[ ACQUIRE_PRIMARY == ACQUIRE_PRIMARY ]] :rg_move_acquire[+75] typeset -i anhp_ret=0 :rg_move_acquire[+76] typeset -i scsi_ret=0 :rg_move_acquire[+78] clodmget -n -q policy = anhp -f value HACMPsplitmerge :rg_move_acquire[+78] typeset ANHP_ENABLED= :rg_move_acquire[+78] [[ == Yes ]] :rg_move_acquire[+87] clodmget -n -q policy = scsi -f value HACMPsplitmerge :rg_move_acquire[+87] typeset SCSIPR_ENABLED= :rg_move_acquire[+87] [[ == Yes ]] :rg_move_acquire[+106] (( 0 == 1 && 0 == 1 )) :rg_move_acquire[+109] (( 0 == 1 && 0 == 0 )) :rg_move_acquire[+112] (( 0 == 1 && 0 == 0 )) :rg_move_acquire[+118] clcallev rg_move epprda 1 ACQUIRE Jan 28 2023 17:10:34 EVENT START: rg_move epprda 1 ACQUIRE |2023-01-28T17:10:34|28698|EVENT START: rg_move epprda 1 ACQUIRE| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-01-28T17:10:34.264066 :clevlog[amlog_trace:320] echo '|2023-01-28T17:10:34.264066|INFO: rg_move|epprd_rg|epprda|1|ACQUIRE' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :rg_move[76] version=%I% :rg_move[86] STATUS=0 :rg_move[88] [[ ! -n '' ]] :rg_move[90] EMULATE=REAL :rg_move[96] set -u :rg_move[98] NODENAME=epprda :rg_move[98] export NODENAME :rg_move[99] RGID=1 :rg_move[100] (( 3 == 3 )) :rg_move[102] ACTION=ACQUIRE :rg_move[108] : serial number for this event is 28698 :rg_move[112] RG_UP_POSTEVENT_ON_NODE=epprda :rg_move[112] export RG_UP_POSTEVENT_ON_NODE :rg_move[116] clodmget -qid=1 -f group -n HACMPgroup :rg_move[116] eval RGNAME=epprd_rg :rg_move[1] RGNAME=epprd_rg :rg_move[118] UPDATESTATD=0 :rg_move[119] export UPDATESTATD :rg_move[123] RG_MOVE_EVENT=true :rg_move[123] export RG_MOVE_EVENT :rg_move[128] group_state='$RESGRP_epprd_rg_epprda' :rg_move[129] set +u :rg_move[130] eval print '$RESGRP_epprd_rg_epprda' :rg_move[1] print :rg_move[130] RG_MOVE_ONLINE='' :rg_move[130] export RG_MOVE_ONLINE :rg_move[131] set -u :rg_move[132] RG_MOVE_ONLINE=TMP_ERROR :rg_move[139] rm -f /tmp/.NFSSTOPPED :rg_move[140] rm -f /tmp/.RPCLOCKDSTOPPED :rg_move[147] set -a :rg_move[148] clsetenvgrp epprda rg_move epprd_rg :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 :rg_move[148] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[149] RC=0 :rg_move[150] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[1] FORCEDOWN_GROUPS='' :rg_move[2] RESOURCE_GROUPS='' :rg_move[3] HOMELESS_GROUPS='' :rg_move[4] HOMELESS_FOLLOWER_GROUPS='' :rg_move[5] ERRSTATE_GROUPS='' :rg_move[6] PRINCIPAL_ACTIONS='' :rg_move[7] ASSOCIATE_ACTIONS='' :rg_move[8] AUXILLIARY_ACTIONS='' :rg_move[8] SIBLING_GROUPS='' :rg_move[9] SIBLING_NODES_BY_GROUP='' :rg_move[10] SIBLING_ACQUIRING_GROUPS='' :rg_move[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' :rg_move[12] SIBLING_RELEASING_GROUPS='' :rg_move[13] SIBLING_RELEASING_NODES_BY_GROUP='' :rg_move[151] set +a :rg_move[155] (( 0 != 0 )) :rg_move[155] [[ -z epprd_rg ]] :rg_move[164] [[ -z TRUE ]] :rg_move[241] AM_SYNC_CALLED_BY=RG_MOVE :rg_move[241] export AM_SYNC_CALLED_BY :rg_move[242] process_resources :process_resources[3318] version=1.169 :process_resources[3321] STATUS=0 :process_resources[3322] sddsrv_off=FALSE :process_resources[3324] true :process_resources[3326] : call rgpa, and it will tell us what to do next :process_resources[3328] set -a :process_resources[3329] clRGPA :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa 2023-01-28T17:10:34.384412 clrgpa :clRGPA[+55] exit 0 :process_resources[3329] eval JOB_TYPE=ACQUIRE RESOURCE_GROUPS='"epprd_rg"' PRINCIPAL_ACTION='"ACQUIRE"' AUXILLIARY_ACTION='"NONE"' :process_resources[1] JOB_TYPE=ACQUIRE :process_resources[1] RESOURCE_GROUPS=epprd_rg :process_resources[1] PRINCIPAL_ACTION=ACQUIRE :process_resources[1] AUXILLIARY_ACTION=NONE :process_resources[3330] RC=0 :process_resources[3331] set +a :process_resources[3333] (( 0 != 0 )) :process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[3360] [[ ACQUIRE == ONLINE ]] +epprd_rg:process_resources[3652] set_resource_group_state ACQUIRING +epprd_rg:process_resources[set_resource_group_state:82] PS4_FUNC=set_resource_group_state +epprd_rg:process_resources[set_resource_group_state:82] typeset PS4_FUNC +epprd_rg:process_resources[set_resource_group_state:83] [[ high == high ]] +epprd_rg:process_resources[set_resource_group_state:83] set -x +epprd_rg:process_resources[set_resource_group_state:84] STAT=0 +epprd_rg:process_resources[set_resource_group_state:85] new_status=ACQUIRING +epprd_rg:process_resources[set_resource_group_state:89] export GROUPNAME +epprd_rg:process_resources[set_resource_group_state:90] [[ ACQUIRING != DOWN ]] +epprd_rg:process_resources[set_resource_group_state:92] clchdaemons -d clstrmgr_scripts -t resource_locator -n epprda -o epprd_rg -v ACQUIRING +epprd_rg:process_resources[set_resource_group_state:100] : Resource Manager Updates +epprd_rg:process_resources[set_resource_group_state:105] amlog_trace '' 'acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:318] clcycle clavailability.log +epprd_rg:process_resources[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:process_resources[amlog_trace:319] cltime +epprd_rg:process_resources[amlog_trace:319] DATE=2023-01-28T17:10:34.418340 +epprd_rg:process_resources[amlog_trace:320] echo '|2023-01-28T17:10:34.418340|INFO: acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:process_resources[set_resource_group_state:106] cl_RMupdate acquiring epprd_rg process_resources 2023-01-28T17:10:34.441217 2023-01-28T17:10:34.445821 +epprd_rg:process_resources[set_resource_group_state:153] return 0 +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T17:10:34.458184 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=WPAR ACTION=ACQUIRE RESOURCE_GROUPS='"epprd_rg' '"' +epprd_rg:process_resources[1] JOB_TYPE=WPAR +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ WPAR == RELEASE ]] +epprd_rg:process_resources[3360] [[ WPAR == ONLINE ]] +epprd_rg:process_resources[3492] process_wpars ACQUIRE +epprd_rg:process_resources[process_wpars:3265] PS4_FUNC=process_wpars +epprd_rg:process_resources[process_wpars:3265] typeset PS4_FUNC +epprd_rg:process_resources[process_wpars:3266] [[ high == high ]] +epprd_rg:process_resources[process_wpars:3266] set -x +epprd_rg:process_resources[process_wpars:3267] STAT=0 +epprd_rg:process_resources[process_wpars:3268] action=ACQUIRE +epprd_rg:process_resources[process_wpars:3268] typeset action +epprd_rg:process_resources[process_wpars:3272] export GROUPNAME +epprd_rg:process_resources[process_wpars:3275] clstart_wpar +epprd_rg:clstart_wpar[180] version=1.12.1.1 +epprd_rg:clstart_wpar[184] [[ rg_move == reconfig_resource_acquire ]] +epprd_rg:clstart_wpar[184] [[ ACQUIRE_PRIMARY == reconfig_resource_acquire ]] +epprd_rg:clstart_wpar[193] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clstart_wpar[193] [[ -z '' ]] +epprd_rg:clstart_wpar[193] exit 0 +epprd_rg:process_resources[process_wpars:3276] RC=0 +epprd_rg:process_resources[process_wpars:3285] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[process_wpars:3294] return 0 +epprd_rg:process_resources[3493] RC=0 +epprd_rg:process_resources[3495] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T17:10:34.489575 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=SERVICE_LABELS ACTION=ACQUIRE IP_LABELS='"epprd"' RESOURCE_GROUPS='"epprd_rg' '"' COMMUNICATION_LINKS='""' +epprd_rg:process_resources[1] JOB_TYPE=SERVICE_LABELS +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] IP_LABELS=epprd +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] COMMUNICATION_LINKS='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ SERVICE_LABELS == RELEASE ]] +epprd_rg:process_resources[3360] [[ SERVICE_LABELS == ONLINE ]] +epprd_rg:process_resources[3407] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[3409] acquire_service_labels +epprd_rg:process_resources[acquire_service_labels:3083] PS4_FUNC=acquire_service_labels +epprd_rg:process_resources[acquire_service_labels:3083] typeset PS4_FUNC +epprd_rg:process_resources[acquire_service_labels:3084] [[ high == high ]] +epprd_rg:process_resources[acquire_service_labels:3084] set -x +epprd_rg:process_resources[acquire_service_labels:3085] STAT=0 +epprd_rg:process_resources[acquire_service_labels:3086] clcallev acquire_service_addr Jan 28 2023 17:10:34 EVENT START: acquire_service_addr |2023-01-28T17:10:34|28698|EVENT START: acquire_service_addr | +epprd_rg:acquire_service_addr[416] version=1.74.1.5 +epprd_rg:acquire_service_addr[423] [[ SERVICE_LABELS != 0 ]] +epprd_rg:acquire_service_addr[423] [[ SERVICE_LABELS != GROUP ]] +epprd_rg:acquire_service_addr[424] PROC_RES=true +epprd_rg:acquire_service_addr[440] saveNSORDER=UNDEFINED +epprd_rg:acquire_service_addr[441] NSORDER=local +epprd_rg:acquire_service_addr[442] export NSORDER +epprd_rg:acquire_service_addr[445] cl_RMupdate resource_acquiring All_service_addrs acquire_service_addr 2023-01-28T17:10:34.568976 2023-01-28T17:10:34.573398 +epprd_rg:acquire_service_addr[452] export GROUPNAME +epprd_rg:acquire_service_addr[458] [[ true == true ]] +epprd_rg:acquire_service_addr[459] get_list_head epprd +epprd_rg:acquire_service_addr[459] read SERVICELABELS +epprd_rg:acquire_service_addr[460] get_list_tail epprd +epprd_rg:acquire_service_addr[460] read IP_LABELS +epprd_rg:acquire_service_addr[471] clgetif -a epprd +epprd_rg:acquire_service_addr[471] 2> /dev/null +epprd_rg:acquire_service_addr[472] (( 3 != 0 )) +epprd_rg:acquire_service_addr[477] cllsif -J '~' -Sn epprd +epprd_rg:acquire_service_addr[477] uniq +epprd_rg:acquire_service_addr[477] cut -d~ -f3 +epprd_rg:acquire_service_addr[477] NETWORK=net_ether_01 +epprd_rg:acquire_service_addr[478] cllsif -J '~' -Si epprda +epprd_rg:acquire_service_addr[478] sort +epprd_rg:acquire_service_addr[478] awk -F~ -v NET=net_ether_01 '{if ($2 == "boot" && $3 == NET) print $1}' +epprd_rg:acquire_service_addr[478] boot_list=epprda +epprd_rg:acquire_service_addr[480] [[ -z epprda ]] +epprd_rg:acquire_service_addr[492] best_boot_addr net_ether_01 epprda +epprd_rg:acquire_service_addr[best_boot_addr:106] NETWORK=net_ether_01 +epprd_rg:acquire_service_addr[best_boot_addr:106] typeset NETWORK +epprd_rg:acquire_service_addr[best_boot_addr:107] shift +epprd_rg:acquire_service_addr[best_boot_addr:108] candidate_boots=epprda +epprd_rg:acquire_service_addr[best_boot_addr:108] typeset candidate_boots +epprd_rg:acquire_service_addr[best_boot_addr:112] echo epprda +epprd_rg:acquire_service_addr[best_boot_addr:112] wc -l +epprd_rg:acquire_service_addr[best_boot_addr:112] tr ' ' '\n' +epprd_rg:acquire_service_addr[best_boot_addr:112] num_candidates=' 1' +epprd_rg:acquire_service_addr[best_boot_addr:112] typeset -li num_candidates +epprd_rg:acquire_service_addr[best_boot_addr:113] (( 1 == 1 )) +epprd_rg:acquire_service_addr[best_boot_addr:114] echo epprda +epprd_rg:acquire_service_addr[best_boot_addr:115] return +epprd_rg:acquire_service_addr[492] boot_addr=epprda +epprd_rg:acquire_service_addr[493] (( 0 != 0 )) +epprd_rg:acquire_service_addr[505] clgetif -a epprda +epprd_rg:acquire_service_addr[505] cut -f1 +epprd_rg:acquire_service_addr[505] 2> /dev/null +epprd_rg:acquire_service_addr[505] INTERFACE='en0 ' +epprd_rg:acquire_service_addr[507] cllsif -J '~' -Sn epprda +epprd_rg:acquire_service_addr[507] cut -f7,9 -d~ +epprd_rg:acquire_service_addr[508] read boot_dot_addr INTERFACE +epprd_rg:acquire_service_addr[508] IFS='~' +epprd_rg:acquire_service_addr[510] [[ -z en0 ]] +epprd_rg:acquire_service_addr[527] cllsif -J '~' -Sn epprd +epprd_rg:acquire_service_addr[527] cut -f7,11,15 -d~ +epprd_rg:acquire_service_addr[527] uniq +epprd_rg:acquire_service_addr[528] read service_dot_addr NETMASK INET_FAMILY +epprd_rg:acquire_service_addr[528] IFS='~' +epprd_rg:acquire_service_addr[530] [[ AF_INET == AF_INET6 ]] +epprd_rg:acquire_service_addr[534] cl_swap_IP_address rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0 +epprd_rg:cl_swap_IP_address[462] version=1.9.14.8 +epprd_rg:cl_swap_IP_address[464] cl_get_path -S +epprd_rg:cl_swap_IP_address[464] OP_SEP='~' +epprd_rg:cl_swap_IP_address[465] LC_ALL=C +epprd_rg:cl_swap_IP_address[465] export LC_ALL +epprd_rg:cl_swap_IP_address[466] RESTORE_ROUTES=/usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[468] cl_echo 33 'Starting execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0' /usr/es/sbin/cluster/events/utils/cl_swap_IP_address 'rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0' Jan 28 2023 17:10:34Starting execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0+epprd_rg:cl_swap_IP_address[470] typeset -i oslevel +epprd_rg:cl_swap_IP_address[471] /usr/bin/sed s/-//g +epprd_rg:cl_swap_IP_address[471] /usr/bin/oslevel -r +epprd_rg:cl_swap_IP_address[471] oslevel=720005 +epprd_rg:cl_swap_IP_address[476] [[ 6 == 6 ]] +epprd_rg:cl_swap_IP_address[477] [[ 6 == 7 ]] +epprd_rg:cl_swap_IP_address[484] no -a +epprd_rg:cl_swap_IP_address[484] awk '{ print $3 }' +epprd_rg:cl_swap_IP_address[484] grep ipignoreredirects +epprd_rg:cl_swap_IP_address[484] PRIOR_IPIGNORE_REDIRECTS_VALUE=0 +epprd_rg:cl_swap_IP_address[485] /usr/sbin/no -o ipignoreredirects=1 Setting ipignoreredirects to 1 +epprd_rg:cl_swap_IP_address[490] PROC_RES=false +epprd_rg:cl_swap_IP_address[491] [[ SERVICE_LABELS != 0 ]] +epprd_rg:cl_swap_IP_address[491] [[ SERVICE_LABELS != GROUP ]] +epprd_rg:cl_swap_IP_address[492] PROC_RES=true +epprd_rg:cl_swap_IP_address[495] set -u +epprd_rg:cl_swap_IP_address[497] RC=0 +epprd_rg:cl_swap_IP_address[504] netstat -in Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll en0 1500 link#2 fa.e6.13.4e.a9.20 183694220 0 60659647 0 0 en0 1500 61.81.244 61.81.244.134 183694220 0 60659647 0 0 lo0 16896 link#1 33645546 0 33645546 0 0 lo0 16896 127 127.0.0.1 33645546 0 33645546 0 0 lo0 16896 ::1%1 33645546 0 33645546 0 0 +epprd_rg:cl_swap_IP_address[505] netstat -rnC Routing tables Destination Gateway Flags Wt Policy If Cost Config_Cost Route tree for Protocol Family 2 (Internet): default 61.81.244.1 UG 1 - en0 0 0 61.81.244.0 61.81.244.134 UHSb 1 - en0 0 0 => 61.81.244/24 61.81.244.134 U 1 - en0 0 0 61.81.244.134 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.255 61.81.244.134 UHSb 1 - en0 0 0 127/8 127.0.0.1 U 1 - lo0 0 0 Route tree for Protocol Family 24 (Internet v6): ::1%1 ::1%1 UH 1 - lo0 0 0 +epprd_rg:cl_swap_IP_address[506] CASC_OR_ROT=rotating +epprd_rg:cl_swap_IP_address[507] ACQ_OR_RLSE=acquire +epprd_rg:cl_swap_IP_address[508] IF=en0 +epprd_rg:cl_swap_IP_address[509] ADDR=61.81.244.156 +epprd_rg:cl_swap_IP_address[510] OLD_ADDR=61.81.244.134 +epprd_rg:cl_swap_IP_address[511] NETMASK=255.255.255.0 +epprd_rg:cl_swap_IP_address[514] [[ rotating == cascading ]] +epprd_rg:cl_swap_IP_address[525] cut -f3 -d~ +epprd_rg:cl_swap_IP_address[525] cllsif -J '~' -Sw -n 61.81.244.156 +epprd_rg:cl_swap_IP_address[525] NET=net_ether_01 +epprd_rg:cl_swap_IP_address[528] clodmget -qidentifier=61.81.244.156 -f max_aliases -n HACMPadapter +epprd_rg:cl_swap_IP_address[528] ALIAS_FIRST=0 +epprd_rg:cl_swap_IP_address[529] grep -c -w inet +epprd_rg:cl_swap_IP_address[529] ifconfig en0 +epprd_rg:cl_swap_IP_address[529] LC_ALL=C +epprd_rg:cl_swap_IP_address[529] NUM_ADDRS=1 +epprd_rg:cl_swap_IP_address[530] [[ acquire == acquire ]] +epprd_rg:cl_swap_IP_address[533] amlog_trace '' 'Aliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_swap_IP_address[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_swap_IP_address[amlog_trace:319] cltime +epprd_rg:cl_swap_IP_address[amlog_trace:319] DATE=2023-01-28T17:10:34.796215 +epprd_rg:cl_swap_IP_address[amlog_trace:320] echo '|2023-01-28T17:10:34.796215|INFO: Aliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_swap_IP_address[535] cl_echo 7310 'cl_swap_IP_address: Configuring network interface en0 with aliased IP address 61.81.244.156' cl_swap_IP_address en0 61.81.244.156 Jan 28 2023 17:10:34cl_swap_IP_address: Configuring network interface en0 with aliased IP address 61.81.244.156+epprd_rg:cl_swap_IP_address[546] (( 1 > 1 )) +epprd_rg:cl_swap_IP_address[550] clifconfig en0 alias 61.81.244.156 netmask 255.255.255.0 firstalias +epprd_rg:clifconfig[117] version=1.9 +epprd_rg:clifconfig[121] set -A args en0 alias 61.81.244.156 netmask 255.255.255.0 firstalias +epprd_rg:clifconfig[124] interface=en0 +epprd_rg:clifconfig[125] shift +epprd_rg:clifconfig[127] [[ -n alias ]] +epprd_rg:clifconfig[129] alias_val=1 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n 61.81.244.156 ]] +epprd_rg:clifconfig[147] params=' address=61.81.244.156' +epprd_rg:clifconfig[147] addr=61.81.244.156 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n netmask ]] +epprd_rg:clifconfig[149] params=' address=61.81.244.156 netmask=255.255.255.0' +epprd_rg:clifconfig[149] shift +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n firstalias ]] +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n '' ]] +epprd_rg:clifconfig[174] [[ -n 1 ]] +epprd_rg:clifconfig[174] [[ -n epprd_rg ]] +epprd_rg:clifconfig[175] clwparname epprd_rg +epprd_rg:clwparname[38] version=1.3.1.1 +epprd_rg:clwparname[44] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clwparname[44] [[ -z '' ]] +epprd_rg:clwparname[44] exit 0 +epprd_rg:clifconfig[175] WPARNAME='' +epprd_rg:clifconfig[176] (( 0 == 0 )) +epprd_rg:clifconfig[176] [[ -n '' ]] +epprd_rg:clifconfig[218] belongs_to_an_active_wpar 61.81.244.156 +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] [[ -z '' ]] +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] return 1 +epprd_rg:clifconfig[218] read wpar_name wpar_if wpar_netmask wpar_broadcast +epprd_rg:clifconfig[218] IFS='~' +epprd_rg:clifconfig[219] rc=1 +epprd_rg:clifconfig[221] [[ 1 == 0 ]] +epprd_rg:clifconfig[275] ifconfig en0 alias 61.81.244.156 netmask 255.255.255.0 firstalias +epprd_rg:cl_swap_IP_address[584] hats_adapter_notify en0 -e 61.81.244.156 alias 2023-01-28T17:10:34.848377 hats_adapter_notify 2023-01-28T17:10:34.849275 hats_adapter_notify +epprd_rg:cl_swap_IP_address[587] check_alias_status en0 61.81.244.156 acquire +epprd_rg:cl_swap_IP_address[check_alias_status:108] CH_INTERFACE=en0 +epprd_rg:cl_swap_IP_address[check_alias_status:109] CH_ADDRESS=61.81.244.156 +epprd_rg:cl_swap_IP_address[check_alias_status:110] CH_ACQ_OR_RLSE=acquire +epprd_rg:cl_swap_IP_address[check_alias_status:118] IF_IB=en0 +epprd_rg:cl_swap_IP_address[check_alias_status:120] echo en0 +epprd_rg:cl_swap_IP_address[check_alias_status:120] awk '{print index($0, "ib")}' +epprd_rg:cl_swap_IP_address[check_alias_status:120] IS_IB=0 +epprd_rg:cl_swap_IP_address[check_alias_status:122] [[ 0 != 1 ]] +epprd_rg:cl_swap_IP_address[check_alias_status:124] clifconfig en0 +epprd_rg:cl_swap_IP_address[check_alias_status:124] awk '{print $2}' +epprd_rg:cl_swap_IP_address[check_alias_status:124] fgrep -w 61.81.244.156 +epprd_rg:clifconfig[117] version=1.9 +epprd_rg:clifconfig[121] set -A args en0 +epprd_rg:clifconfig[124] interface=en0 +epprd_rg:clifconfig[125] shift +epprd_rg:clifconfig[127] [[ -n '' ]] +epprd_rg:clifconfig[174] [[ -n '' ]] +epprd_rg:clifconfig[218] belongs_to_an_active_wpar +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] [[ -z '' ]] +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] return 1 +epprd_rg:clifconfig[218] read wpar_name wpar_if wpar_netmask wpar_broadcast +epprd_rg:clifconfig[218] IFS='~' +epprd_rg:clifconfig[219] rc=1 +epprd_rg:clifconfig[221] [[ 1 == 0 ]] +epprd_rg:clifconfig[275] ifconfig en0 +epprd_rg:cl_swap_IP_address[check_alias_status:124] ADDR=61.81.244.156 +epprd_rg:cl_swap_IP_address[check_alias_status:129] [ acquire = acquire ] +epprd_rg:cl_swap_IP_address[check_alias_status:133] [[ 61.81.244.156 != 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[check_alias_status:144] return 0 +epprd_rg:cl_swap_IP_address[588] RC=0 +epprd_rg:cl_swap_IP_address[590] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[594] amlog_trace '' 'Aliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_swap_IP_address[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_swap_IP_address[amlog_trace:319] cltime +epprd_rg:cl_swap_IP_address[amlog_trace:319] DATE=2023-01-28T17:10:34.904230 +epprd_rg:cl_swap_IP_address[amlog_trace:320] echo '|2023-01-28T17:10:34.904230|INFO: Aliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_swap_IP_address[701] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[714] flush_arp +epprd_rg:cl_swap_IP_address[flush_arp:49] arp -an +epprd_rg:cl_swap_IP_address[flush_arp:49] grep '\?' +epprd_rg:cl_swap_IP_address[flush_arp:49] tr -d '()' +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.27 61.81.244.27 (61.81.244.27) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.30 61.81.244.30 (61.81.244.30) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.31 61.81.244.31 (61.81.244.31) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.33 61.81.244.33 (61.81.244.33) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.35 61.81.244.35 (61.81.244.35) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.40 61.81.244.40 (61.81.244.40) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.41 61.81.244.41 (61.81.244.41) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.191 61.81.244.191 (61.81.244.191) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.192 61.81.244.192 (61.81.244.192) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.193 61.81.244.193 (61.81.244.193) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.194 61.81.244.194 (61.81.244.194) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.201 61.81.244.201 (61.81.244.201) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.202 61.81.244.202 (61.81.244.202) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.53 61.81.244.53 (61.81.244.53) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.54 61.81.244.54 (61.81.244.54) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.204 61.81.244.204 (61.81.244.204) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.55 61.81.244.55 (61.81.244.55) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.56 61.81.244.56 (61.81.244.56) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.205 61.81.244.205 (61.81.244.205) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.57 61.81.244.57 (61.81.244.57) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.206 61.81.244.206 (61.81.244.206) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.58 61.81.244.58 (61.81.244.58) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.207 61.81.244.207 (61.81.244.207) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.59 61.81.244.59 (61.81.244.59) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.208 61.81.244.208 (61.81.244.208) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.60 61.81.244.60 (61.81.244.60) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.209 61.81.244.209 (61.81.244.209) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.210 61.81.244.210 (61.81.244.210) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.211 61.81.244.211 (61.81.244.211) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.212 61.81.244.212 (61.81.244.212) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.213 61.81.244.213 (61.81.244.213) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.215 61.81.244.215 (61.81.244.215) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.216 61.81.244.216 (61.81.244.216) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.217 61.81.244.217 (61.81.244.217) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.220 61.81.244.220 (61.81.244.220) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.221 61.81.244.221 (61.81.244.221) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.224 61.81.244.224 (61.81.244.224) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.227 61.81.244.227 (61.81.244.227) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.228 61.81.244.228 (61.81.244.228) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.229 61.81.244.229 (61.81.244.229) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.230 61.81.244.230 (61.81.244.230) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.231 61.81.244.231 (61.81.244.231) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.232 61.81.244.232 (61.81.244.232) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.233 61.81.244.233 (61.81.244.233) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.234 61.81.244.234 (61.81.244.234) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.236 61.81.244.236 (61.81.244.236) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.237 61.81.244.237 (61.81.244.237) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.239 61.81.244.239 (61.81.244.239) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.122 61.81.244.122 (61.81.244.122) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.123 61.81.244.123 (61.81.244.123) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.125 61.81.244.125 (61.81.244.125) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.127 61.81.244.127 (61.81.244.127) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.128 61.81.244.128 (61.81.244.128) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.129 61.81.244.129 (61.81.244.129) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.130 61.81.244.130 (61.81.244.130) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.131 61.81.244.131 (61.81.244.131) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.133 61.81.244.133 (61.81.244.133) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.135 61.81.244.135 (61.81.244.135) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.136 61.81.244.136 (61.81.244.136) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.138 61.81.244.138 (61.81.244.138) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.139 61.81.244.139 (61.81.244.139) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.141 61.81.244.141 (61.81.244.141) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.142 61.81.244.142 (61.81.244.142) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.143 61.81.244.143 (61.81.244.143) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.144 61.81.244.144 (61.81.244.144) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.146 61.81.244.146 (61.81.244.146) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.147 61.81.244.147 (61.81.244.147) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.149 61.81.244.149 (61.81.244.149) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.1 61.81.244.1 (61.81.244.1) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.151 61.81.244.151 (61.81.244.151) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.152 61.81.244.152 (61.81.244.152) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.153 61.81.244.153 (61.81.244.153) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.157 61.81.244.157 (61.81.244.157) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.158 61.81.244.158 (61.81.244.158) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.160 61.81.244.160 (61.81.244.160) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.161 61.81.244.161 (61.81.244.161) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.162 61.81.244.162 (61.81.244.162) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.163 61.81.244.163 (61.81.244.163) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.17 61.81.244.17 (61.81.244.17) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.19 61.81.244.19 (61.81.244.19) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:52] return 0 +epprd_rg:cl_swap_IP_address[716] netstat -in Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll en0 1500 link#2 fa.e6.13.4e.a9.20 183694316 0 60659773 0 0 en0 1500 61.81.244 61.81.244.156 183694316 0 60659773 0 0 en0 1500 61.81.244 61.81.244.134 183694316 0 60659773 0 0 lo0 16896 link#1 33645560 0 33645560 0 0 lo0 16896 127 127.0.0.1 33645560 0 33645560 0 0 lo0 16896 ::1%1 33645560 0 33645560 0 0 +epprd_rg:cl_swap_IP_address[717] netstat -rnC Routing tables Destination Gateway Flags Wt Policy If Cost Config_Cost Route tree for Protocol Family 2 (Internet): default 61.81.244.1 UG 1 - en0 0 0 61.81.244.0 61.81.244.156 UHSb 1 - en0 0 0 => 61.81.244/24 61.81.244.156 U 1 - en0 0 0 61.81.244.134 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.156 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.255 61.81.244.156 UHSb 1 - en0 0 0 127/8 127.0.0.1 U 1 - lo0 0 0 Route tree for Protocol Family 24 (Internet v6): ::1%1 ::1%1 UH 1 - lo0 0 0 +epprd_rg:cl_swap_IP_address[989] no -o ipignoreredirects=0 Setting ipignoreredirects to 0 +epprd_rg:cl_swap_IP_address[992] cl_echo 32 'Completed execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0. Exit status = 0' /usr/es/sbin/cluster/events/utils/cl_swap_IP_address 'rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0' 0 Jan 28 2023 17:10:35Completed execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0. Exit status = 0+epprd_rg:cl_swap_IP_address[994] date Sat Jan 28 17:10:35 KORST 2023 +epprd_rg:cl_swap_IP_address[996] exit 0 +epprd_rg:acquire_service_addr[537] RC=0 +epprd_rg:acquire_service_addr[539] (( 0 != 0 )) +epprd_rg:acquire_service_addr[549] [[ true == false ]] +epprd_rg:acquire_service_addr[560] cl_RMupdate resource_up All_nonerror_service_addrs acquire_service_addr 2023-01-28T17:10:35.127628 2023-01-28T17:10:35.132044 +epprd_rg:acquire_service_addr[565] [[ UNDEFINED != UNDEFINED ]] +epprd_rg:acquire_service_addr[568] NSORDER='' +epprd_rg:acquire_service_addr[568] export NSORDER +epprd_rg:acquire_service_addr[571] [[ true == false ]] +epprd_rg:acquire_service_addr[579] exit 0 Jan 28 2023 17:10:35 EVENT COMPLETED: acquire_service_addr 0 |2023-01-28T17:10:35|28698|EVENT COMPLETED: acquire_service_addr 0| +epprd_rg:process_resources[acquire_service_labels:3087] RC=0 +epprd_rg:process_resources[acquire_service_labels:3089] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[acquire_service_labels:3104] (( 0 != 0 )) +epprd_rg:process_resources[acquire_service_labels:3110] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. +epprd_rg:process_resources[acquire_service_labels:3112] return 0 +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T17:10:35.207722 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=DISKS ACTION=ACQUIRE HDISKS='"hdisk2,hdisk3,hdisk4,hdisk5,hdisk6,hdisk7,hdisk8"' RESOURCE_GROUPS='"epprd_rg' '"' VOLUME_GROUPS='"datavg,datavg,datavg,datavg,datavg,datavg,datavg"' +epprd_rg:process_resources[1] JOB_TYPE=DISKS +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] HDISKS=hdisk2,hdisk3,hdisk4,hdisk5,hdisk6,hdisk7,hdisk8 +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] VOLUME_GROUPS=datavg,datavg,datavg,datavg,datavg,datavg,datavg +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ DISKS == RELEASE ]] +epprd_rg:process_resources[3360] [[ DISKS == ONLINE ]] +epprd_rg:process_resources[3439] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[3441] FAILED_RR_RGS='' +epprd_rg:process_resources[3442] get_disks_main +epprd_rg:process_resources[get_disks_main:981] PS4_FUNC=get_disks_main +epprd_rg:process_resources[get_disks_main:981] typeset PS4_FUNC +epprd_rg:process_resources[get_disks_main:982] [[ high == high ]] +epprd_rg:process_resources[get_disks_main:982] set -x +epprd_rg:process_resources[get_disks_main:983] SKIPBRKRES=0 +epprd_rg:process_resources[get_disks_main:983] typeset -li SKIPBRKRES +epprd_rg:process_resources[get_disks_main:984] STAT=0 +epprd_rg:process_resources[get_disks_main:985] FAILURE_IN_METHOD=0 +epprd_rg:process_resources[get_disks_main:985] typeset -li FAILURE_IN_METHOD +epprd_rg:process_resources[get_disks_main:986] LIST_OF_FAILED_RGS='' +epprd_rg:process_resources[get_disks_main:989] : Below are the list of resources as generated by clrgpa +epprd_rg:process_resources[get_disks_main:991] RG_LIST=epprd_rg +epprd_rg:process_resources[get_disks_main:992] RDISK_LIST='' +epprd_rg:process_resources[get_disks_main:993] DISK_LIST=hdisk2,hdisk3,hdisk4,hdisk5,hdisk6,hdisk7,hdisk8 +epprd_rg:process_resources[get_disks_main:994] VG_LIST=datavg,datavg,datavg,datavg,datavg,datavg,datavg +epprd_rg:process_resources[get_disks_main:997] : Resource groups are processed individually. This is required because +epprd_rg:process_resources[get_disks_main:998] : the replication mechanism may differ between resource groups. +epprd_rg:process_resources[get_disks_main:1002] getReplicatedResources epprd_rg +epprd_rg:process_resources[getReplicatedResources:699] PS4_FUNC=getReplicatedResources +epprd_rg:process_resources[getReplicatedResources:699] typeset PS4_FUNC +epprd_rg:process_resources[getReplicatedResources:700] [[ high == high ]] +epprd_rg:process_resources[getReplicatedResources:700] set -x +epprd_rg:process_resources[getReplicatedResources:702] RV=false +epprd_rg:process_resources[getReplicatedResources:704] clodmget -n -f type HACMPrresmethods +epprd_rg:process_resources[getReplicatedResources:704] [[ -n 9 ]] +epprd_rg:process_resources[getReplicatedResources:707] : Replicated resource methods are defined, check for resources +epprd_rg:process_resources[getReplicatedResources:709] clodmget -q $'name like \'*_REP_RESOURCE\' AND group=epprd_rg' -f value -n HACMPresource +epprd_rg:process_resources[getReplicatedResources:709] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:718] : Verify if any backup profiles are configured and trigger cbm utilities based on that +epprd_rg:process_resources[getReplicatedResources:720] clodmget -q name=BACKUP_ENABLED -f value HACMPresource +epprd_rg:process_resources[getReplicatedResources:720] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:739] echo false +epprd_rg:process_resources[get_disks_main:1002] REPLICATED_RESOURCES=false +epprd_rg:process_resources[get_disks_main:1005] : Break out the resources for resource group epprd_rg +epprd_rg:process_resources[get_disks_main:1007] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[get_disks_main:1008] VOLUME_GROUPS='' +epprd_rg:process_resources[get_disks_main:1009] HDISKS='' +epprd_rg:process_resources[get_disks_main:1010] RHDISKS='' +epprd_rg:process_resources[get_disks_main:1011] RDISK_LIST='' +epprd_rg:process_resources[get_disks_main:1014] : Get the volume groups in resource group epprd_rg +epprd_rg:process_resources[get_disks_main:1016] print datavg,datavg,datavg,datavg,datavg,datavg,datavg +epprd_rg:process_resources[get_disks_main:1016] read VOLUME_GROUPS VG_LIST +epprd_rg:process_resources[get_disks_main:1016] IFS=: +epprd_rg:process_resources[get_disks_main:1018] : Removing duplicate entries in VG list. +epprd_rg:process_resources[get_disks_main:1020] echo datavg,datavg,datavg,datavg,datavg,datavg,datavg +epprd_rg:process_resources[get_disks_main:1020] tr , '\n' +epprd_rg:process_resources[get_disks_main:1020] xargs +epprd_rg:process_resources[get_disks_main:1020] sort -u +epprd_rg:process_resources[get_disks_main:1020] VOLUME_GROUPS=datavg +epprd_rg:process_resources[get_disks_main:1022] : Get the disks corresponding to these volume groups +epprd_rg:process_resources[get_disks_main:1024] print hdisk2,hdisk3,hdisk4,hdisk5,hdisk6,hdisk7,hdisk8 +epprd_rg:process_resources[get_disks_main:1024] read HDISKS DISK_LIST +epprd_rg:process_resources[get_disks_main:1024] IFS=: +epprd_rg:process_resources[get_disks_main:1025] HDISKS='hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8' +epprd_rg:process_resources[get_disks_main:1031] : Pick up any raw disks not returned by clrgpa +epprd_rg:process_resources[get_disks_main:1033] clodmget -q group='epprd_rg AND name=RAW_DISK' HACMPresource +epprd_rg:process_resources[get_disks_main:1033] [[ -n '' ]] +epprd_rg:process_resources[get_disks_main:1042] : Get any raw disks in resource group epprd_rg +epprd_rg:process_resources[get_disks_main:1045] print +epprd_rg:process_resources[get_disks_main:1045] read RHDISKS RDISK_LIST +epprd_rg:process_resources[get_disks_main:1045] IFS=: +epprd_rg:process_resources[get_disks_main:1046] RHDISKS='' +epprd_rg:process_resources[get_disks_main:1047] print datavg +epprd_rg:process_resources[get_disks_main:1047] read VOLUME_GROUPS +epprd_rg:process_resources[get_disks_main:1051] : At this point, the global variables below should be set to +epprd_rg:process_resources[get_disks_main:1052] : the values associated with resource group epprd_rg +epprd_rg:process_resources[get_disks_main:1054] export RESOURCE_GROUPS +epprd_rg:process_resources[get_disks_main:1055] export VOLUME_GROUPS +epprd_rg:process_resources[get_disks_main:1056] export HDISKS +epprd_rg:process_resources[get_disks_main:1057] export RHDISKS +epprd_rg:process_resources[get_disks_main:1059] [[ false == true ]] +epprd_rg:process_resources[get_disks_main:1182] get_disks +epprd_rg:process_resources[get_disks:1198] PS4_FUNC=get_disks +epprd_rg:process_resources[get_disks:1198] typeset PS4_FUNC +epprd_rg:process_resources[get_disks:1199] [[ high == high ]] +epprd_rg:process_resources[get_disks:1199] set -x +epprd_rg:process_resources[get_disks:1201] STAT=0 +epprd_rg:process_resources[get_disks:1204] : Most volume groups are Enhanced Concurrent Mode, and it should +epprd_rg:process_resources[get_disks:1205] : not be necessary to break reserves. If all the volume groups +epprd_rg:process_resources[get_disks:1206] : are ECM, we should be able to skip breaking reserves. If it +epprd_rg:process_resources[get_disks:1207] : turns out that there is a reserve on a disk in an ECM volume +epprd_rg:process_resources[get_disks:1208] : group, that will be handled by cl_pvo making an explicit call +epprd_rg:process_resources[get_disks:1209] : to cl_disk_available. +epprd_rg:process_resources[get_disks:1213] all_ecm=TRUE +epprd_rg:process_resources[get_disks:1214] IFS=: +epprd_rg:process_resources[get_disks:1214] set -- datavg +epprd_rg:process_resources[get_disks:1214] print datavg +epprd_rg:process_resources[get_disks:1216] print datavg +epprd_rg:process_resources[get_disks:1216] sort -u +epprd_rg:process_resources[get_disks:1216] tr , '\n' +epprd_rg:process_resources[get_disks:1218] clodmget -q 'name = datavg and attribute = conc_capable' -f value -n CuAt +epprd_rg:process_resources[get_disks:1218] [[ y != y ]] +epprd_rg:process_resources[get_disks:1224] [[ TRUE == FALSE ]] +epprd_rg:process_resources[get_disks:1226] [[ TRUE == TRUE ]] +epprd_rg:process_resources[get_disks:1226] return 0 +epprd_rg:process_resources[get_disks_main:1183] STAT=0 +epprd_rg:process_resources[get_disks_main:1186] return 0 +epprd_rg:process_resources[3443] tr ' ' '\n' +epprd_rg:process_resources[3443] echo +epprd_rg:process_resources[3443] FAILED_RR_RGS='' +epprd_rg:process_resources[3444] [[ -n '' ]] +epprd_rg:process_resources[3450] clodmget -n -q policy=scsi -f value HACMPsplitmerge +epprd_rg:process_resources[3450] SCSIPR_ENABLED='' +epprd_rg:process_resources[3450] typeset SCSIPR_ENABLED +epprd_rg:process_resources[3451] [[ '' == Yes ]] +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T17:10:35.284368 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=VGS ACTION=ACQUIRE CONCURRENT_VOLUME_GROUP='""' VOLUME_GROUPS='"datavg"' RESOURCE_GROUPS='"epprd_rg' '"' EXPORT_FILESYSTEM='""' +epprd_rg:process_resources[1] JOB_TYPE=VGS +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] CONCURRENT_VOLUME_GROUP='' +epprd_rg:process_resources[1] VOLUME_GROUPS=datavg +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] EXPORT_FILESYSTEM='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ VGS == RELEASE ]] +epprd_rg:process_resources[3360] [[ VGS == ONLINE ]] +epprd_rg:process_resources[3571] process_volume_groups_main ACQUIRE +epprd_rg:process_resources[process_volume_groups_main:2293] PS4_FUNC=process_volume_groups_main +epprd_rg:process_resources[process_volume_groups_main:2293] typeset PS4_FUNC +epprd_rg:process_resources[process_volume_groups_main:2294] [[ high == high ]] +epprd_rg:process_resources[process_volume_groups_main:2294] set -x +epprd_rg:process_resources[process_volume_groups_main:2295] DEF_VARYON_ACTION=0 +epprd_rg:process_resources[process_volume_groups_main:2295] typeset -li DEF_VARYON_ACTION +epprd_rg:process_resources[process_volume_groups_main:2296] FAILURE_IN_METHOD=0 +epprd_rg:process_resources[process_volume_groups_main:2296] typeset -li FAILURE_IN_METHOD +epprd_rg:process_resources[process_volume_groups_main:2297] ACTION=ACQUIRE +epprd_rg:process_resources[process_volume_groups_main:2297] typeset ACTION +epprd_rg:process_resources[process_volume_groups_main:2298] STAT=0 +epprd_rg:process_resources[process_volume_groups_main:2299] VG_LIST=datavg +epprd_rg:process_resources[process_volume_groups_main:2300] RG_LIST=epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2304] getReplicatedResources epprd_rg +epprd_rg:process_resources[getReplicatedResources:699] PS4_FUNC=getReplicatedResources +epprd_rg:process_resources[getReplicatedResources:699] typeset PS4_FUNC +epprd_rg:process_resources[getReplicatedResources:700] [[ high == high ]] +epprd_rg:process_resources[getReplicatedResources:700] set -x +epprd_rg:process_resources[getReplicatedResources:702] RV=false +epprd_rg:process_resources[getReplicatedResources:704] clodmget -n -f type HACMPrresmethods +epprd_rg:process_resources[getReplicatedResources:704] [[ -n 9 ]] +epprd_rg:process_resources[getReplicatedResources:707] : Replicated resource methods are defined, check for resources +epprd_rg:process_resources[getReplicatedResources:709] clodmget -q $'name like \'*_REP_RESOURCE\' AND group=epprd_rg' -f value -n HACMPresource +epprd_rg:process_resources[getReplicatedResources:709] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:718] : Verify if any backup profiles are configured and trigger cbm utilities based on that +epprd_rg:process_resources[getReplicatedResources:720] clodmget -q name=BACKUP_ENABLED -f value HACMPresource +epprd_rg:process_resources[getReplicatedResources:720] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:739] echo false +epprd_rg:process_resources[process_volume_groups_main:2304] REPLICATED_RESOURCES=false +epprd_rg:process_resources[process_volume_groups_main:2305] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2306] print -- datavg +epprd_rg:process_resources[process_volume_groups_main:2306] read VOLUME_GROUPS VG_LIST +epprd_rg:process_resources[process_volume_groups_main:2306] IFS=: +epprd_rg:process_resources[process_volume_groups_main:2307] VOLUME_GROUPS=datavg +epprd_rg:process_resources[process_volume_groups_main:2310] : At this point, these variables contain information only for epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2312] export VOLUME_GROUPS +epprd_rg:process_resources[process_volume_groups_main:2313] export RESOURCE_GROUPS +epprd_rg:process_resources[process_volume_groups_main:2315] [[ false == true ]] +epprd_rg:process_resources[process_volume_groups_main:2555] process_volume_groups ACQUIRE +epprd_rg:process_resources[process_volume_groups:2571] PS4_FUNC=process_volume_groups +epprd_rg:process_resources[process_volume_groups:2571] typeset PS4_FUNC +epprd_rg:process_resources[process_volume_groups:2572] [[ high == high ]] +epprd_rg:process_resources[process_volume_groups:2572] set -x +epprd_rg:process_resources[process_volume_groups:2573] STAT=0 +epprd_rg:process_resources[process_volume_groups:2575] GROUPNAME=epprd_rg +epprd_rg:process_resources[process_volume_groups:2575] export GROUPNAME +epprd_rg:process_resources[process_volume_groups:2578] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[process_volume_groups:2581] : Varyon the VGs in the environment +epprd_rg:process_resources[process_volume_groups:2583] cl_activate_vgs -n +epprd_rg:cl_activate_vgs[213] [[ high == high ]] +epprd_rg:cl_activate_vgs[213] version=1.46 +epprd_rg:cl_activate_vgs[215] STATUS=0 +epprd_rg:cl_activate_vgs[215] typeset -li STATUS +epprd_rg:cl_activate_vgs[216] SYNCFLAG='' +epprd_rg:cl_activate_vgs[217] CLENV='' +epprd_rg:cl_activate_vgs[218] TMP_FILENAME=/tmp/_activate_vgs.tmp +epprd_rg:cl_activate_vgs[219] USE_OEM_METHODS=false +epprd_rg:cl_activate_vgs[221] PROC_RES=false +epprd_rg:cl_activate_vgs[225] [[ VGS != 0 ]] +epprd_rg:cl_activate_vgs[225] [[ VGS != GROUP ]] +epprd_rg:cl_activate_vgs[226] PROC_RES=true +epprd_rg:cl_activate_vgs[232] [[ -n == -n ]] +epprd_rg:cl_activate_vgs[234] SYNCFLAG=-n +epprd_rg:cl_activate_vgs[235] shift +epprd_rg:cl_activate_vgs[240] (( 0 != 0 )) +epprd_rg:cl_activate_vgs[247] set -u +epprd_rg:cl_activate_vgs[250] rm -f /tmp/_activate_vgs.tmp +epprd_rg:cl_activate_vgs[254] lsvg -L -o +epprd_rg:cl_activate_vgs[254] print caavg_private rootvg +epprd_rg:cl_activate_vgs[254] VGSTATUS='caavg_private rootvg' +epprd_rg:cl_activate_vgs[257] ALLVGS=All_volume_groups +epprd_rg:cl_activate_vgs[258] cl_RMupdate resource_acquiring All_volume_groups cl_activate_vgs 2023-01-28T17:10:35.356061 2023-01-28T17:10:35.360470 +epprd_rg:cl_activate_vgs[262] [[ true == false ]] +epprd_rg:cl_activate_vgs[285] LIST_OF_VOLUME_GROUPS_FOR_RG='' +epprd_rg:cl_activate_vgs[289] export GROUPNAME +epprd_rg:cl_activate_vgs[291] echo datavg +epprd_rg:cl_activate_vgs[291] read LIST_OF_VOLUME_GROUPS_FOR_RG VOLUME_GROUPS +epprd_rg:cl_activate_vgs[291] IFS=: +epprd_rg:cl_activate_vgs[294] echo datavg +epprd_rg:cl_activate_vgs[296] sort -u +epprd_rg:cl_activate_vgs[295] tr , '\n' +epprd_rg:cl_activate_vgs[294] LIST_OF_VOLUME_GROUPS_FOR_RG=datavg +epprd_rg:cl_activate_vgs[298] vgs_list datavg +epprd_rg:cl_activate_vgs[vgs_list:178] PS4_LOOP='' +epprd_rg:cl_activate_vgs[vgs_list:178] typeset PS4_LOOP +epprd_rg:cl_activate_vgs:datavg[vgs_list:182] PS4_LOOP=datavg +epprd_rg:cl_activate_vgs:datavg[vgs_list:186] [[ 'caavg_private rootvg' == @(?(*\ )datavg?(\ *)) ]] +epprd_rg:cl_activate_vgs:datavg[vgs_list:192] : call varyon for the volume group in Foreground +epprd_rg:cl_activate_vgs:datavg[vgs_list:194] vgs_chk datavg -n cl_activate_vgs +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:78] VG=datavg +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:78] typeset VG +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:79] SYNCFLAG=-n +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:79] typeset SYNCFLAG +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:80] PROGNAME=cl_activate_vgs +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:80] typeset PROGNAME +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:81] STATUS=0 +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:81] typeset -li STATUS +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:83] [[ -n '' ]] +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:100] amlog_trace '' 'Activating Volume Group|datavg' +epprd_rg:cl_activate_vgs(0.052):datavg[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_vgs(0.053):datavg[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_vgs(0.078):datavg[amlog_trace:319] cltime +epprd_rg:cl_activate_vgs(0.080):datavg[amlog_trace:319] DATE=2023-01-28T17:10:35.397679 +epprd_rg:cl_activate_vgs(0.080):datavg[amlog_trace:320] echo '|2023-01-28T17:10:35.397679|INFO: Activating Volume Group|datavg' +epprd_rg:cl_activate_vgs(0.080):datavg[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_vgs(0.080):datavg[vgs_chk:102] typeset -x ERRMSG +epprd_rg:cl_activate_vgs(0.080):datavg[vgs_chk:103] clvaryonvg -n datavg +epprd_rg:clvaryonvg(0.009):datavg[985] version=1.21.7.22 +epprd_rg:clvaryonvg(0.009):datavg[989] : Without this test, cause of failure due to non-root may not be obvious +epprd_rg:clvaryonvg(0.009):datavg[991] [[ -z '' ]] +epprd_rg:clvaryonvg(0.009):datavg[991] id -nu +epprd_rg:clvaryonvg(0.010):datavg[991] 2> /dev/null +epprd_rg:clvaryonvg(0.012):datavg[991] user_name=root +epprd_rg:clvaryonvg(0.012):datavg[994] : Check if RBAC is enabled +epprd_rg:clvaryonvg(0.012):datavg[996] is_rbac_enabled='' +epprd_rg:clvaryonvg(0.012):datavg[996] typeset is_rbac_enabled +epprd_rg:clvaryonvg(0.012):datavg[997] clodmget -nq group='LDAPClient and name=RBACConfig' -f value HACMPLDAP +epprd_rg:clvaryonvg(0.013):datavg[997] 2> /dev/null +epprd_rg:clvaryonvg(0.015):datavg[997] is_rbac_enabled='' +epprd_rg:clvaryonvg(0.016):datavg[999] role='' +epprd_rg:clvaryonvg(0.016):datavg[999] typeset role +epprd_rg:clvaryonvg(0.016):datavg[1000] [[ root != root ]] +epprd_rg:clvaryonvg(0.016):datavg[1009] LEAVEOFF=FALSE +epprd_rg:clvaryonvg(0.016):datavg[1010] FORCEON='' +epprd_rg:clvaryonvg(0.016):datavg[1011] FORCEUPD=FALSE +epprd_rg:clvaryonvg(0.016):datavg[1012] NOQUORUM=20 +epprd_rg:clvaryonvg(0.016):datavg[1013] MISSING_UPDATES=30 +epprd_rg:clvaryonvg(0.016):datavg[1014] DATA_DIVERGENCE=31 +epprd_rg:clvaryonvg(0.016):datavg[1015] ARGS='' +epprd_rg:clvaryonvg(0.016):datavg[1016] typeset -li varyonvg_rc +epprd_rg:clvaryonvg(0.016):datavg[1017] typeset -li MAXLVS +epprd_rg:clvaryonvg(0.016):datavg[1018] ENODEV=19 +epprd_rg:clvaryonvg(0.016):datavg[1018] typeset -li ENODEV +epprd_rg:clvaryonvg(0.016):datavg[1020] set -u +epprd_rg:clvaryonvg(0.016):datavg[1022] /bin/dspmsg -s 2 cspoc.cat 31 'usage: clvaryonvg [-F] [-f] [-n] [-p] [-s] [-o] \n' +epprd_rg:clvaryonvg(0.018):datavg[1022] USAGE='usage: clvaryonvg [-F] [-f] [-n] [-p] [-s] [-o] ' +epprd_rg:clvaryonvg(0.018):datavg[1023] (( 2 < 1 )) +epprd_rg:clvaryonvg(0.018):datavg[1029] : Parse the options +epprd_rg:clvaryonvg(0.018):datavg[1031] S_FLAG='' +epprd_rg:clvaryonvg(0.018):datavg[1032] P_FLAG='' +epprd_rg:clvaryonvg(0.018):datavg[1033] getopts :Ffnops option +epprd_rg:clvaryonvg(0.018):datavg[1038] : -n Always applied, retained for compatibility +epprd_rg:clvaryonvg(0.018):datavg[1033] getopts :Ffnops option +epprd_rg:clvaryonvg(0.018):datavg[1048] : Pick up the volume group name, which follows the options +epprd_rg:clvaryonvg(0.018):datavg[1050] shift 1 +epprd_rg:clvaryonvg(0.018):datavg[1051] VG=datavg +epprd_rg:clvaryonvg(0.018):datavg[1054] : Set up filenames we will be using +epprd_rg:clvaryonvg(0.018):datavg[1056] VGDIR=/usr/es/sbin/cluster/etc/vg/ +epprd_rg:clvaryonvg(0.018):datavg[1057] TSFILE=/usr/es/sbin/cluster/etc/vg/datavg.tstamp +epprd_rg:clvaryonvg(0.018):datavg[1058] DSFILE=/usr/es/sbin/cluster/etc/vg/datavg.desc +epprd_rg:clvaryonvg(0.018):datavg[1059] RPFILE=/usr/es/sbin/cluster/etc/vg/datavg.replay +epprd_rg:clvaryonvg(0.019):datavg[1060] permset=/usr/es/sbin/cluster/etc/vg/datavg.perms +epprd_rg:clvaryonvg(0.019):datavg[1061] failfile=/usr/es/sbin/cluster/etc/vg/datavg.fail +epprd_rg:clvaryonvg(0.019):datavg[1065] : Get some LVM information we are going to need in processing this +epprd_rg:clvaryonvg(0.019):datavg[1066] : volume group: +epprd_rg:clvaryonvg(0.019):datavg[1067] : - volume group identifier - vgid +epprd_rg:clvaryonvg(0.019):datavg[1068] : - list of disks +epprd_rg:clvaryonvg(0.019):datavg[1069] : - quorum indicator +epprd_rg:clvaryonvg(0.019):datavg[1070] : - timestamp if present +epprd_rg:clvaryonvg(0.019):datavg[1072] /usr/sbin/getlvodm -v datavg +epprd_rg:clvaryonvg(0.022):datavg[1072] VGID=00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(0.023):datavg[1073] cut '-d ' -f2 +epprd_rg:clvaryonvg(0.023):datavg[1073] /usr/sbin/getlvodm -w 00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(0.027):datavg[1073] pvlst=$'hdisk2\nhdisk3\nhdisk4\nhdisk5\nhdisk6\nhdisk7\nhdisk8' +epprd_rg:clvaryonvg(0.027):datavg[1074] /usr/sbin/getlvodm -Q datavg +epprd_rg:clvaryonvg(0.030):datavg[1074] quorum=y +epprd_rg:clvaryonvg(0.030):datavg[1075] TS_FROM_DISK='' +epprd_rg:clvaryonvg(0.030):datavg[1076] TS_FROM_ODM='' +epprd_rg:clvaryonvg(0.030):datavg[1077] GOOD_PV='' +epprd_rg:clvaryonvg(0.030):datavg[1078] O_flag='' +epprd_rg:clvaryonvg(0.030):datavg[1079] A_flag='' +epprd_rg:clvaryonvg(0.030):datavg[1080] mode_flag='' +epprd_rg:clvaryonvg(0.030):datavg[1081] vg_on_mode='' +epprd_rg:clvaryonvg(0.030):datavg[1082] vg_set_passive=FALSE +epprd_rg:clvaryonvg(0.030):datavg[1084] odmget -q 'attribute = varyon_state' PdAt +epprd_rg:clvaryonvg(0.033):datavg[1084] [[ -n $'\nPdAt:\n\tuniquetype = "logical_volume/vgsubclass/vgtype"\n\tattribute = "varyon_state"\n\tdeflt = "0"\n\tvalues = "0,1,2,3"\n\twidth = ""\n\ttype = "R"\n\tgeneric = ""\n\trep = "l"\n\tnls_index = 0' ]] +epprd_rg:clvaryonvg(0.033):datavg[1087] : LVM may record that a volume group was varied on from an earlier +epprd_rg:clvaryonvg(0.033):datavg[1088] : IPL. Rely on HA state tracking, and override the LVM check +epprd_rg:clvaryonvg(0.033):datavg[1090] O_flag=-O +epprd_rg:clvaryonvg(0.033):datavg[1093] : Checking if SCSI PR is enabled and it is so, +epprd_rg:clvaryonvg(0.033):datavg[1094] : confirming if the SCSI PR reservations are intact. +epprd_rg:clvaryonvg(0.034):datavg[1096] lssrc -ls clstrmgrES +epprd_rg:clvaryonvg(0.034):datavg[1096] 2>& 1 +epprd_rg:clvaryonvg(0.035):datavg[1096] grep 'Current state:' +epprd_rg:clvaryonvg(0.037):datavg[1096] egrep -q -v 'ST_INIT|NOT_CONFIGURED' +epprd_rg:clvaryonvg(0.050):datavg[1098] clodmget -n -q policy=scsi -f value HACMPsplitmerge +epprd_rg:clvaryonvg(0.053):datavg[1098] SCSIPR_ENABLED='' +epprd_rg:clvaryonvg(0.053):datavg[1098] typeset SCSIPR_ENABLED +epprd_rg:clvaryonvg(0.053):datavg[1099] clodmget -q $'name like \'*VOLUME_GROUP\' and value = datavg' -f group -n HACMPresource +epprd_rg:clvaryonvg(0.056):datavg[1099] resgrp=epprd_rg +epprd_rg:clvaryonvg(0.056):datavg[1099] typeset resgrp +epprd_rg:clvaryonvg(0.056):datavg[1100] [[ '' == Yes ]] +epprd_rg:clvaryonvg(0.056):datavg[1134] : Operations such as varying on the volume group are likely to +epprd_rg:clvaryonvg(0.056):datavg[1135] : require read/write access. So, set any volume group fencing appropriately. +epprd_rg:clvaryonvg(0.056):datavg[1137] cl_set_vg_fence_height -c datavg rw +epprd_rg:clvaryonvg(0.060):datavg[1138] RC=0 +epprd_rg:clvaryonvg(0.060):datavg[1139] (( 19 == 0 )) +epprd_rg:clvaryonvg(0.060):datavg[1147] : Return code from volume group fencing for datavg is 0 +epprd_rg:clvaryonvg(0.060):datavg[1148] (( 0 != 0 )) +epprd_rg:clvaryonvg(0.060):datavg[1160] : Check on the current state of the volume group +epprd_rg:clvaryonvg(0.062):datavg[1182] grep -x -q datavg +epprd_rg:clvaryonvg(0.062):datavg[1182] lsvg -L +epprd_rg:clvaryonvg(0.065):datavg[1184] : The volume group is known - check to see if its already varyd on. +epprd_rg:clvaryonvg(0.066):datavg[1186] grep -x -q datavg +epprd_rg:clvaryonvg(0.066):datavg[1186] lsvg -L -o +epprd_rg:clvaryonvg(0.070):datavg[1190] lsvg -L datavg +epprd_rg:clvaryonvg(0.070):datavg[1190] 2> /dev/null +epprd_rg:clvaryonvg(0.070):datavg[1190] grep -q -i -w passive-only +epprd_rg:clvaryonvg(0.112):datavg[1191] vg_on_mode=passive +epprd_rg:clvaryonvg(0.114):datavg[1194] grep -iw removed +epprd_rg:clvaryonvg(0.114):datavg[1194] lsvg -p datavg +epprd_rg:clvaryonvg(0.114):datavg[1194] 2> /dev/null +epprd_rg:clvaryonvg(0.134):datavg[1194] removed_disks='' +epprd_rg:clvaryonvg(0.134):datavg[1195] [[ -n '' ]] +epprd_rg:clvaryonvg(0.134):datavg[1213] [[ -n passive ]] +epprd_rg:clvaryonvg(0.134):datavg[1215] lqueryvg -g 00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(0.135):datavg[1215] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.154):datavg[1321] : +epprd_rg:clvaryonvg(0.154):datavg[1322] : First, sniff at the disk to see if the local ODM information +epprd_rg:clvaryonvg(0.154):datavg[1323] : matches what is on the disk. +epprd_rg:clvaryonvg(0.154):datavg[1324] : +epprd_rg:clvaryonvg(0.154):datavg[1326] vgdatimestamps +epprd_rg:clvaryonvg(0.154):datavg[vgdatimestamps:201] PS4_FUNC=vgdatimestamps +epprd_rg:clvaryonvg(0.154):datavg[vgdatimestamps:201] typeset PS4_FUNC +epprd_rg:clvaryonvg(0.154):datavg[vgdatimestamps:202] [[ high == high ]] +epprd_rg:clvaryonvg(0.154):datavg[vgdatimestamps:202] set -x +epprd_rg:clvaryonvg(0.154):datavg[vgdatimestamps:203] set -u +epprd_rg:clvaryonvg(0.154):datavg[vgdatimestamps:206] : See what timestamp LVM has recorded from the last time it checked +epprd_rg:clvaryonvg(0.154):datavg[vgdatimestamps:207] : the disks +epprd_rg:clvaryonvg(0.154):datavg[vgdatimestamps:209] /usr/sbin/getlvodm -T 00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(0.155):datavg[vgdatimestamps:209] 2> /dev/null +epprd_rg:clvaryonvg(0.158):datavg[vgdatimestamps:209] TS_FROM_ODM=63d4d463310b8939 +epprd_rg:clvaryonvg(0.158):datavg[vgdatimestamps:212] : Check to see if HACMP is maintaining a timestamp for this volume group +epprd_rg:clvaryonvg(0.158):datavg[vgdatimestamps:213] : Needed for some older volume groups +epprd_rg:clvaryonvg(0.158):datavg[vgdatimestamps:215] [[ -s /usr/es/sbin/cluster/etc/vg/datavg.tstamp ]] +epprd_rg:clvaryonvg(0.158):datavg[vgdatimestamps:234] : Get the time stamp from the actual disk +epprd_rg:clvaryonvg(0.158):datavg[vgdatimestamps:236] clvgdats /dev/datavg +epprd_rg:clvaryonvg(0.159):datavg[vgdatimestamps:236] 2> /dev/null +epprd_rg:clvaryonvg(0.168):datavg[vgdatimestamps:236] TS_FROM_DISK=63d4d463310b8939 +epprd_rg:clvaryonvg(0.168):datavg[vgdatimestamps:237] clvgdats_rc=0 +epprd_rg:clvaryonvg(0.168):datavg[vgdatimestamps:238] (( 0 != 0 )) +epprd_rg:clvaryonvg(0.168):datavg[vgdatimestamps:247] [[ -z 63d4d463310b8939 ]] +epprd_rg:clvaryonvg(0.168):datavg[1328] [[ 63d4d463310b8939 != 63d4d463310b8939 ]] +epprd_rg:clvaryonvg(0.168):datavg[1344] : There is a chance that a VG that should be in passive mode is not. +epprd_rg:clvaryonvg(0.168):datavg[1345] : Run cl_pvo to put it in passive mode if possible. +epprd_rg:clvaryonvg(0.168):datavg[1350] [[ -z passive ]] +epprd_rg:clvaryonvg(0.168):datavg[1350] [[ passive == ordinary ]] +epprd_rg:clvaryonvg(0.168):datavg[1350] [[ passive == passive ]] +epprd_rg:clvaryonvg(0.168):datavg[1350] [[ -n '' ]] +epprd_rg:clvaryonvg(0.168):datavg[1381] : Let us assume that the old style synclvodm would sync all the PV/FS changes. +epprd_rg:clvaryonvg(0.168):datavg[1383] expimpvg_notrequired=1 +epprd_rg:clvaryonvg(0.168):datavg[1386] : Optimistically give varyonvg a try. +epprd_rg:clvaryonvg(0.168):datavg[1388] [[ passive == passive ]] +epprd_rg:clvaryonvg(0.168):datavg[1391] : If the volume group was varyd on in passive mode when this node came +epprd_rg:clvaryonvg(0.168):datavg[1392] : up, flip it over to active mode. Following logic will then fall +epprd_rg:clvaryonvg(0.168):datavg[1393] : through to updatefs. +epprd_rg:clvaryonvg(0.168):datavg[1395] [[ passive == passive ]] +epprd_rg:clvaryonvg(0.168):datavg[1395] A_flag=-A +epprd_rg:clvaryonvg(0.168):datavg[1396] varyonvg -n -c -A -O datavg +epprd_rg:clvaryonvg(0.169):datavg[1396] 2>& 1 +epprd_rg:clvaryonvg(0.388):datavg[1396] varyonvg_output='' +epprd_rg:clvaryonvg(0.388):datavg[1397] varyonvg_rc=0 +epprd_rg:clvaryonvg(0.388):datavg[1397] typeset -li varyonvg_rc +epprd_rg:clvaryonvg(0.388):datavg[1399] (( 0 != 0 )) +epprd_rg:clvaryonvg(0.388):datavg[1481] (( 0 != 0 )) +epprd_rg:clvaryonvg(0.388):datavg[1576] : At this point, datavg should be varied on +epprd_rg:clvaryonvg(0.389):datavg[1578] [[ FALSE == TRUE ]] +epprd_rg:clvaryonvg(0.389):datavg[1585] [[ -z 63d4d463310b8939 ]] +epprd_rg:clvaryonvg(0.389):datavg[1592] vgdatimestamps +epprd_rg:clvaryonvg(0.389):datavg[vgdatimestamps:201] PS4_FUNC=vgdatimestamps +epprd_rg:clvaryonvg(0.389):datavg[vgdatimestamps:201] typeset PS4_FUNC +epprd_rg:clvaryonvg(0.389):datavg[vgdatimestamps:202] [[ high == high ]] +epprd_rg:clvaryonvg(0.389):datavg[vgdatimestamps:202] set -x +epprd_rg:clvaryonvg(0.389):datavg[vgdatimestamps:203] set -u +epprd_rg:clvaryonvg(0.389):datavg[vgdatimestamps:206] : See what timestamp LVM has recorded from the last time it checked +epprd_rg:clvaryonvg(0.389):datavg[vgdatimestamps:207] : the disks +epprd_rg:clvaryonvg(0.389):datavg[vgdatimestamps:209] /usr/sbin/getlvodm -T 00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(0.390):datavg[vgdatimestamps:209] 2> /dev/null +epprd_rg:clvaryonvg(0.392):datavg[vgdatimestamps:209] TS_FROM_ODM=63d4d87b2421bec0 +epprd_rg:clvaryonvg(0.392):datavg[vgdatimestamps:212] : Check to see if HACMP is maintaining a timestamp for this volume group +epprd_rg:clvaryonvg(0.392):datavg[vgdatimestamps:213] : Needed for some older volume groups +epprd_rg:clvaryonvg(0.392):datavg[vgdatimestamps:215] [[ -s /usr/es/sbin/cluster/etc/vg/datavg.tstamp ]] +epprd_rg:clvaryonvg(0.392):datavg[vgdatimestamps:234] : Get the time stamp from the actual disk +epprd_rg:clvaryonvg(0.392):datavg[vgdatimestamps:236] clvgdats /dev/datavg +epprd_rg:clvaryonvg(0.393):datavg[vgdatimestamps:236] 2> /dev/null +epprd_rg:clvaryonvg(0.403):datavg[vgdatimestamps:236] TS_FROM_DISK=63d4d87b2421bec0 +epprd_rg:clvaryonvg(0.403):datavg[vgdatimestamps:237] clvgdats_rc=0 +epprd_rg:clvaryonvg(0.403):datavg[vgdatimestamps:238] (( 0 != 0 )) +epprd_rg:clvaryonvg(0.403):datavg[vgdatimestamps:247] [[ -z 63d4d87b2421bec0 ]] +epprd_rg:clvaryonvg(0.403):datavg[1600] [[ 63d4d87b2421bec0 != 63d4d87b2421bec0 ]] +epprd_rg:clvaryonvg(0.403):datavg[1622] [[ FALSE == TRUE ]] +epprd_rg:clvaryonvg(0.403):datavg[1633] : Even if everything looks OK, update the local file system +epprd_rg:clvaryonvg(0.403):datavg[1634] : definitions, since changes there do not show up in the +epprd_rg:clvaryonvg(0.403):datavg[1635] : VGDA timestamps +epprd_rg:clvaryonvg(0.403):datavg[1637] updatefs datavg +epprd_rg:clvaryonvg(0.403):datavg[updatefs:506] PS4_FUNC=updatefs +epprd_rg:clvaryonvg(0.403):datavg[updatefs:506] typeset PS4_FUNC +epprd_rg:clvaryonvg(0.403):datavg[updatefs:507] [[ high == high ]] +epprd_rg:clvaryonvg(0.403):datavg[updatefs:507] set -x +epprd_rg:clvaryonvg(0.403):datavg[updatefs:508] do_imfs='' +epprd_rg:clvaryonvg(0.403):datavg[updatefs:508] typeset do_imfs +epprd_rg:clvaryonvg(0.403):datavg[updatefs:509] has_typed_lvs='' +epprd_rg:clvaryonvg(0.403):datavg[updatefs:509] typeset has_typed_lvs +epprd_rg:clvaryonvg(0.403):datavg[updatefs:512] : Delete existing filesystem information for this volume group. This is +epprd_rg:clvaryonvg(0.403):datavg[updatefs:513] : needed because imfs will not update an existing /etc/filesystems entry. +epprd_rg:clvaryonvg(0.405):datavg[updatefs:515] cut -f1 '-d ' +epprd_rg:clvaryonvg(0.405):datavg[updatefs:515] /usr/sbin/getlvodm -L datavg +epprd_rg:clvaryonvg(0.409):datavg[updatefs:515] lv_list=$'saplv\nsapmntlv\noraclelv\nepplv\noraarchlv\nsapdata1lv\nsapdata2lv\nsapdata3lv\nsapdata4lv\nboardlv\noriglogAlv\noriglogBlv\nmirrlogAlv\nmirrlogBlv\nepprdaloglv' +epprd_rg:clvaryonvg(0.409):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.409):datavg[updatefs:521] clodmget -q 'name = saplv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.412):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.412):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.412):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.412):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.412):datavg[updatefs:530] /usr/sbin/getlvcb -f saplv +epprd_rg:clvaryonvg(0.413):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.431):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(0.431):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(0.431):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.433):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.433):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false +epprd_rg:clvaryonvg(0.436):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.436):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.436):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.436):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.438):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.457):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.457):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.457):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.457):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.459):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(0.459):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.462):datavg[updatefs:545] /usr/sbin/imfs -lx saplv +epprd_rg:clvaryonvg(0.466):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.466):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.466):datavg[updatefs:521] clodmget -q 'name = sapmntlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.469):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.469):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.469):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.469):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.469):datavg[updatefs:530] /usr/sbin/getlvcb -f sapmntlv +epprd_rg:clvaryonvg(0.470):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.488):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(0.488):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(0.488):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.490):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.490):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false +epprd_rg:clvaryonvg(0.494):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.494):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.494):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.494):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.495):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.513):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.513):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.513):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.513):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.514):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(0.514):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.518):datavg[updatefs:545] /usr/sbin/imfs -lx sapmntlv +epprd_rg:clvaryonvg(0.522):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.522):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.522):datavg[updatefs:521] clodmget -q 'name = oraclelv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.525):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.525):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.525):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.525):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.526):datavg[updatefs:530] /usr/sbin/getlvcb -f oraclelv +epprd_rg:clvaryonvg(0.526):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.544):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(0.544):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(0.544):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.546):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.546):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false +epprd_rg:clvaryonvg(0.550):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.550):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.550):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.550):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.551):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.570):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.570):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.570):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.570):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.571):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(0.571):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.574):datavg[updatefs:545] /usr/sbin/imfs -lx oraclelv +epprd_rg:clvaryonvg(0.578):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.578):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.579):datavg[updatefs:521] clodmget -q 'name = epplv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.582):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.582):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.582):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.582):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.582):datavg[updatefs:530] /usr/sbin/getlvcb -f epplv +epprd_rg:clvaryonvg(0.583):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.601):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(0.601):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(0.601):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.602):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.603):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false +epprd_rg:clvaryonvg(0.606):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.606):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.606):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.606):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.608):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.628):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.628):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.628):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.628):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.629):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(0.629):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.632):datavg[updatefs:545] /usr/sbin/imfs -lx epplv +epprd_rg:clvaryonvg(0.637):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.637):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.637):datavg[updatefs:521] clodmget -q 'name = oraarchlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.641):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.641):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.641):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.641):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.641):datavg[updatefs:530] /usr/sbin/getlvcb -f oraarchlv +epprd_rg:clvaryonvg(0.642):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.661):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(0.661):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(0.661):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.662):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.662):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false +epprd_rg:clvaryonvg(0.666):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.666):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.666):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.666):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.667):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.686):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.686):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.686):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.686):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.687):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(0.687):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.691):datavg[updatefs:545] /usr/sbin/imfs -lx oraarchlv +epprd_rg:clvaryonvg(0.695):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.695):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.695):datavg[updatefs:521] clodmget -q 'name = sapdata1lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.698):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.698):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.698):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.698):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.698):datavg[updatefs:530] /usr/sbin/getlvcb -f sapdata1lv +epprd_rg:clvaryonvg(0.699):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.717):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(0.717):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(0.717):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.719):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.719):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false +epprd_rg:clvaryonvg(0.722):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.722):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.722):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.722):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.724):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.743):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.743):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.743):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.743):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.744):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(0.744):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.747):datavg[updatefs:545] /usr/sbin/imfs -lx sapdata1lv +epprd_rg:clvaryonvg(0.751):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.751):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.751):datavg[updatefs:521] clodmget -q 'name = sapdata2lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.754):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.755):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.755):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.755):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.755):datavg[updatefs:530] /usr/sbin/getlvcb -f sapdata2lv +epprd_rg:clvaryonvg(0.756):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.773):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(0.773):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(0.773):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.775):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.775):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false +epprd_rg:clvaryonvg(0.778):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.778):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.778):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.778):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.780):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.799):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.799):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.799):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.799):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.800):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(0.800):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.803):datavg[updatefs:545] /usr/sbin/imfs -lx sapdata2lv +epprd_rg:clvaryonvg(0.807):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.807):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.807):datavg[updatefs:521] clodmget -q 'name = sapdata3lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.811):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.811):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.811):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.811):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.811):datavg[updatefs:530] /usr/sbin/getlvcb -f sapdata3lv +epprd_rg:clvaryonvg(0.812):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.829):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(0.829):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(0.829):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.831):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.831):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false +epprd_rg:clvaryonvg(0.835):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.835):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.835):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.835):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.836):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.855):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.855):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.855):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.855):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.856):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(0.856):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.859):datavg[updatefs:545] /usr/sbin/imfs -lx sapdata3lv +epprd_rg:clvaryonvg(0.863):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.863):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.863):datavg[updatefs:521] clodmget -q 'name = sapdata4lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.867):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.867):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.867):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.867):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.867):datavg[updatefs:530] /usr/sbin/getlvcb -f sapdata4lv +epprd_rg:clvaryonvg(0.868):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.885):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(0.885):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(0.885):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.887):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.887):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false +epprd_rg:clvaryonvg(0.891):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.891):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.891):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.891):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.892):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.912):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.912):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.912):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.912):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.913):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(0.913):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.916):datavg[updatefs:545] /usr/sbin/imfs -lx sapdata4lv +epprd_rg:clvaryonvg(0.920):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.920):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.920):datavg[updatefs:521] clodmget -q 'name = boardlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.924):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.924):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.924):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.924):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.924):datavg[updatefs:530] /usr/sbin/getlvcb -f boardlv +epprd_rg:clvaryonvg(0.925):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.945):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(0.945):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(0.945):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.946):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.946):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false +epprd_rg:clvaryonvg(0.950):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.950):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.950):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.950):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.951):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.970):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.970):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.970):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.970):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.971):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(0.971):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.974):datavg[updatefs:545] /usr/sbin/imfs -lx boardlv +epprd_rg:clvaryonvg(0.979):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.979):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.979):datavg[updatefs:521] clodmget -q 'name = origlogAlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.982):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.982):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.982):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.982):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.982):datavg[updatefs:530] /usr/sbin/getlvcb -f origlogAlv +epprd_rg:clvaryonvg(0.983):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.000):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(1.000):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(1.000):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.002):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false +epprd_rg:clvaryonvg(1.003):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.007):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.007):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.007):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.007):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.008):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.026):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.026):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.026):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.026):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.027):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(1.028):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.031):datavg[updatefs:545] /usr/sbin/imfs -lx origlogAlv +epprd_rg:clvaryonvg(1.035):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.035):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.035):datavg[updatefs:521] clodmget -q 'name = origlogBlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.039):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.039):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.039):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.039):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.039):datavg[updatefs:530] /usr/sbin/getlvcb -f origlogBlv +epprd_rg:clvaryonvg(1.040):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.057):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(1.057):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(1.057):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.058):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false +epprd_rg:clvaryonvg(1.059):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.063):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.063):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.063):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.063):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.064):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.082):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.082):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.082):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.082):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.083):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(1.084):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.087):datavg[updatefs:545] /usr/sbin/imfs -lx origlogBlv +epprd_rg:clvaryonvg(1.091):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.091):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.091):datavg[updatefs:521] clodmget -q 'name = mirrlogAlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.094):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.094):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.094):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.094):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.094):datavg[updatefs:530] /usr/sbin/getlvcb -f mirrlogAlv +epprd_rg:clvaryonvg(1.095):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.111):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(1.111):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(1.111):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.113):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false +epprd_rg:clvaryonvg(1.114):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.117):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.117):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.117):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.118):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.119):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.136):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.136):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.136):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.136):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.137):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(1.139):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.141):datavg[updatefs:545] /usr/sbin/imfs -lx mirrlogAlv +epprd_rg:clvaryonvg(1.145):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.145):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.145):datavg[updatefs:521] clodmget -q 'name = mirrlogBlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.148):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.148):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.149):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.149):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.149):datavg[updatefs:530] /usr/sbin/getlvcb -f mirrlogBlv +epprd_rg:clvaryonvg(1.149):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.166):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(1.166):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(1.166):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.168):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false +epprd_rg:clvaryonvg(1.169):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.172):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.172):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.172):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.172):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.173):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.191):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.191):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.191):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.191):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.192):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(1.193):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.196):datavg[updatefs:545] /usr/sbin/imfs -lx mirrlogBlv +epprd_rg:clvaryonvg(1.200):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.200):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.200):datavg[updatefs:521] clodmget -q 'name = epprdaloglv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.203):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.203):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.203):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.203):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.203):datavg[updatefs:530] /usr/sbin/getlvcb -f epprdaloglv +epprd_rg:clvaryonvg(1.204):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.221):datavg[updatefs:530] fs_info=' ' +epprd_rg:clvaryonvg(1.221):datavg[updatefs:531] [[ -n ' ' ]] +epprd_rg:clvaryonvg(1.221):datavg[updatefs:531] [[ ' ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.221):datavg[updatefs:552] [[ -n true ]] +epprd_rg:clvaryonvg(1.221):datavg[updatefs:556] : Pick up any file system changes that may have happened when +epprd_rg:clvaryonvg(1.221):datavg[updatefs:557] : the volume group was owned by another node. That is, if a +epprd_rg:clvaryonvg(1.221):datavg[updatefs:558] : local change was made - not through C-SPOC, we whould have no +epprd_rg:clvaryonvg(1.221):datavg[updatefs:559] : indication it happened. +epprd_rg:clvaryonvg(1.221):datavg[updatefs:561] [[ -z '' ]] +epprd_rg:clvaryonvg(1.221):datavg[updatefs:563] /usr/sbin/imfs datavg +epprd_rg:clvaryonvg(1.888):datavg[updatefs:589] : For a valid file system configuration, the mount point in +epprd_rg:clvaryonvg(1.888):datavg[updatefs:590] : /etc/filesystems for the logical volume should match the +epprd_rg:clvaryonvg(1.888):datavg[updatefs:591] : label of the logical volume. The above imfs should have +epprd_rg:clvaryonvg(1.888):datavg[updatefs:592] : matched those two. Now, check that they match the label +epprd_rg:clvaryonvg(1.888):datavg[updatefs:593] : for the logical volume as saved in ODM. +epprd_rg:clvaryonvg(1.888):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.888):datavg[updatefs:600] clodmget -q 'name = saplv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.892):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(1.892):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(1.892):datavg[updatefs:607] /usr/sbin/getlvcb -f saplv +epprd_rg:clvaryonvg(1.909):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(1.909):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(1.909):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(1.909):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(1.909):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(1.909):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(1.909):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.909):datavg[updatefs:623] : Label and file system type from LVCB on disk for saplv +epprd_rg:clvaryonvg(1.910):datavg[updatefs:625] getlvcb -T -A saplv +epprd_rg:clvaryonvg(1.910):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(1.913):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(1.916):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(1.918):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(1.932):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(1.932):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(1.932):datavg[updatefs:632] : Mount point in /etc/filesystems for saplv +epprd_rg:clvaryonvg(1.934):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/saplv$' /etc/filesystems +epprd_rg:clvaryonvg(1.936):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(1.938):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(1.941):datavg[updatefs:634] fs_mount_point=/usr/sap +epprd_rg:clvaryonvg(1.941):datavg[updatefs:637] : CuAt label attribute for saplv +epprd_rg:clvaryonvg(1.941):datavg[updatefs:639] clodmget -q 'name = saplv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(1.944):datavg[updatefs:639] CuAt_label=/usr/sap +epprd_rg:clvaryonvg(1.946):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(1.947):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(1.950):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(1.950):datavg[updatefs:657] [[ -z /usr/sap ]] +epprd_rg:clvaryonvg(1.950):datavg[updatefs:657] [[ /usr/sap == None ]] +epprd_rg:clvaryonvg(1.950):datavg[updatefs:665] [[ /usr/sap == /usr/sap ]] +epprd_rg:clvaryonvg(1.950):datavg[updatefs:665] [[ /usr/sap != /usr/sap ]] +epprd_rg:clvaryonvg(1.950):datavg[updatefs:685] [[ /usr/sap != /usr/sap ]] +epprd_rg:clvaryonvg(1.950):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.950):datavg[updatefs:600] clodmget -q 'name = sapmntlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.953):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(1.954):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(1.954):datavg[updatefs:607] /usr/sbin/getlvcb -f sapmntlv +epprd_rg:clvaryonvg(1.971):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(1.971):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(1.971):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(1.971):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(1.971):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(1.971):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(1.971):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.971):datavg[updatefs:623] : Label and file system type from LVCB on disk for sapmntlv +epprd_rg:clvaryonvg(1.972):datavg[updatefs:625] getlvcb -T -A sapmntlv +epprd_rg:clvaryonvg(1.972):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(1.975):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(1.978):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(1.980):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(1.993):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(1.993):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(1.993):datavg[updatefs:632] : Mount point in /etc/filesystems for sapmntlv +epprd_rg:clvaryonvg(1.995):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/sapmntlv$' /etc/filesystems +epprd_rg:clvaryonvg(1.997):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(1.999):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.002):datavg[updatefs:634] fs_mount_point=/sapmnt +epprd_rg:clvaryonvg(2.002):datavg[updatefs:637] : CuAt label attribute for sapmntlv +epprd_rg:clvaryonvg(2.002):datavg[updatefs:639] clodmget -q 'name = sapmntlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.006):datavg[updatefs:639] CuAt_label=/sapmnt +epprd_rg:clvaryonvg(2.007):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.008):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.011):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.012):datavg[updatefs:657] [[ -z /sapmnt ]] +epprd_rg:clvaryonvg(2.012):datavg[updatefs:657] [[ /sapmnt == None ]] +epprd_rg:clvaryonvg(2.012):datavg[updatefs:665] [[ /sapmnt == /sapmnt ]] +epprd_rg:clvaryonvg(2.012):datavg[updatefs:665] [[ /sapmnt != /sapmnt ]] +epprd_rg:clvaryonvg(2.012):datavg[updatefs:685] [[ /sapmnt != /sapmnt ]] +epprd_rg:clvaryonvg(2.012):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.012):datavg[updatefs:600] clodmget -q 'name = oraclelv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.015):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.015):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.015):datavg[updatefs:607] /usr/sbin/getlvcb -f oraclelv +epprd_rg:clvaryonvg(2.032):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(2.032):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.032):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.032):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.032):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.032):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(2.032):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.032):datavg[updatefs:623] : Label and file system type from LVCB on disk for oraclelv +epprd_rg:clvaryonvg(2.033):datavg[updatefs:625] getlvcb -T -A oraclelv +epprd_rg:clvaryonvg(2.033):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.037):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.040):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.042):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.054):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.054):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.054):datavg[updatefs:632] : Mount point in /etc/filesystems for oraclelv +epprd_rg:clvaryonvg(2.056):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/oraclelv$' /etc/filesystems +epprd_rg:clvaryonvg(2.058):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.060):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.063):datavg[updatefs:634] fs_mount_point=/oracle +epprd_rg:clvaryonvg(2.063):datavg[updatefs:637] : CuAt label attribute for oraclelv +epprd_rg:clvaryonvg(2.063):datavg[updatefs:639] clodmget -q 'name = oraclelv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.067):datavg[updatefs:639] CuAt_label=/oracle +epprd_rg:clvaryonvg(2.068):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.069):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.072):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.072):datavg[updatefs:657] [[ -z /oracle ]] +epprd_rg:clvaryonvg(2.073):datavg[updatefs:657] [[ /oracle == None ]] +epprd_rg:clvaryonvg(2.073):datavg[updatefs:665] [[ /oracle == /oracle ]] +epprd_rg:clvaryonvg(2.073):datavg[updatefs:665] [[ /oracle != /oracle ]] +epprd_rg:clvaryonvg(2.073):datavg[updatefs:685] [[ /oracle != /oracle ]] +epprd_rg:clvaryonvg(2.073):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.073):datavg[updatefs:600] clodmget -q 'name = epplv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.076):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.076):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.076):datavg[updatefs:607] /usr/sbin/getlvcb -f epplv +epprd_rg:clvaryonvg(2.093):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(2.094):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.094):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.094):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.094):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.094):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(2.094):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.094):datavg[updatefs:623] : Label and file system type from LVCB on disk for epplv +epprd_rg:clvaryonvg(2.095):datavg[updatefs:625] getlvcb -T -A epplv +epprd_rg:clvaryonvg(2.095):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.098):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.101):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.103):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.115):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.115):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.115):datavg[updatefs:632] : Mount point in /etc/filesystems for epplv +epprd_rg:clvaryonvg(2.117):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/epplv$' /etc/filesystems +epprd_rg:clvaryonvg(2.119):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.121):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.124):datavg[updatefs:634] fs_mount_point=/oracle/EPP +epprd_rg:clvaryonvg(2.124):datavg[updatefs:637] : CuAt label attribute for epplv +epprd_rg:clvaryonvg(2.124):datavg[updatefs:639] clodmget -q 'name = epplv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.128):datavg[updatefs:639] CuAt_label=/oracle/EPP +epprd_rg:clvaryonvg(2.129):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.130):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.134):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.134):datavg[updatefs:657] [[ -z /oracle/EPP ]] +epprd_rg:clvaryonvg(2.134):datavg[updatefs:657] [[ /oracle/EPP == None ]] +epprd_rg:clvaryonvg(2.134):datavg[updatefs:665] [[ /oracle/EPP == /oracle/EPP ]] +epprd_rg:clvaryonvg(2.134):datavg[updatefs:665] [[ /oracle/EPP != /oracle/EPP ]] +epprd_rg:clvaryonvg(2.134):datavg[updatefs:685] [[ /oracle/EPP != /oracle/EPP ]] +epprd_rg:clvaryonvg(2.134):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.134):datavg[updatefs:600] clodmget -q 'name = oraarchlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.137):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.137):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.137):datavg[updatefs:607] /usr/sbin/getlvcb -f oraarchlv +epprd_rg:clvaryonvg(2.154):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(2.154):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.154):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.154):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.155):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.155):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(2.155):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.155):datavg[updatefs:623] : Label and file system type from LVCB on disk for oraarchlv +epprd_rg:clvaryonvg(2.155):datavg[updatefs:625] getlvcb -T -A oraarchlv +epprd_rg:clvaryonvg(2.156):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.159):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.162):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.164):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.177):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.177):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.177):datavg[updatefs:632] : Mount point in /etc/filesystems for oraarchlv +epprd_rg:clvaryonvg(2.178):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/oraarchlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.181):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.183):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.186):datavg[updatefs:634] fs_mount_point=/oracle/EPP/oraarch +epprd_rg:clvaryonvg(2.186):datavg[updatefs:637] : CuAt label attribute for oraarchlv +epprd_rg:clvaryonvg(2.186):datavg[updatefs:639] clodmget -q 'name = oraarchlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.189):datavg[updatefs:639] CuAt_label=/oracle/EPP/oraarch +epprd_rg:clvaryonvg(2.191):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.191):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.195):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.195):datavg[updatefs:657] [[ -z /oracle/EPP/oraarch ]] +epprd_rg:clvaryonvg(2.195):datavg[updatefs:657] [[ /oracle/EPP/oraarch == None ]] +epprd_rg:clvaryonvg(2.195):datavg[updatefs:665] [[ /oracle/EPP/oraarch == /oracle/EPP/oraarch ]] +epprd_rg:clvaryonvg(2.196):datavg[updatefs:665] [[ /oracle/EPP/oraarch != /oracle/EPP/oraarch ]] +epprd_rg:clvaryonvg(2.196):datavg[updatefs:685] [[ /oracle/EPP/oraarch != /oracle/EPP/oraarch ]] +epprd_rg:clvaryonvg(2.196):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.196):datavg[updatefs:600] clodmget -q 'name = sapdata1lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.199):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.199):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.199):datavg[updatefs:607] /usr/sbin/getlvcb -f sapdata1lv +epprd_rg:clvaryonvg(2.216):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(2.216):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.216):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.216):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.216):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.216):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(2.216):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.216):datavg[updatefs:623] : Label and file system type from LVCB on disk for sapdata1lv +epprd_rg:clvaryonvg(2.217):datavg[updatefs:625] getlvcb -T -A sapdata1lv +epprd_rg:clvaryonvg(2.217):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.221):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.224):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.225):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.238):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.238):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.238):datavg[updatefs:632] : Mount point in /etc/filesystems for sapdata1lv +epprd_rg:clvaryonvg(2.240):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/sapdata1lv$' /etc/filesystems +epprd_rg:clvaryonvg(2.242):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.244):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.247):datavg[updatefs:634] fs_mount_point=/oracle/EPP/sapdata1 +epprd_rg:clvaryonvg(2.247):datavg[updatefs:637] : CuAt label attribute for sapdata1lv +epprd_rg:clvaryonvg(2.247):datavg[updatefs:639] clodmget -q 'name = sapdata1lv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.250):datavg[updatefs:639] CuAt_label=/oracle/EPP/sapdata1 +epprd_rg:clvaryonvg(2.252):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.253):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.256):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.256):datavg[updatefs:657] [[ -z /oracle/EPP/sapdata1 ]] +epprd_rg:clvaryonvg(2.256):datavg[updatefs:657] [[ /oracle/EPP/sapdata1 == None ]] +epprd_rg:clvaryonvg(2.256):datavg[updatefs:665] [[ /oracle/EPP/sapdata1 == /oracle/EPP/sapdata1 ]] +epprd_rg:clvaryonvg(2.256):datavg[updatefs:665] [[ /oracle/EPP/sapdata1 != /oracle/EPP/sapdata1 ]] +epprd_rg:clvaryonvg(2.256):datavg[updatefs:685] [[ /oracle/EPP/sapdata1 != /oracle/EPP/sapdata1 ]] +epprd_rg:clvaryonvg(2.256):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.256):datavg[updatefs:600] clodmget -q 'name = sapdata2lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.260):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.260):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.260):datavg[updatefs:607] /usr/sbin/getlvcb -f sapdata2lv +epprd_rg:clvaryonvg(2.277):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(2.277):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.277):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.277):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.277):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.277):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(2.277):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.277):datavg[updatefs:623] : Label and file system type from LVCB on disk for sapdata2lv +epprd_rg:clvaryonvg(2.278):datavg[updatefs:625] getlvcb -T -A sapdata2lv +epprd_rg:clvaryonvg(2.279):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.282):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.285):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.287):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.300):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.300):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.300):datavg[updatefs:632] : Mount point in /etc/filesystems for sapdata2lv +epprd_rg:clvaryonvg(2.302):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.303):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/sapdata2lv$' /etc/filesystems +epprd_rg:clvaryonvg(2.305):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.309):datavg[updatefs:634] fs_mount_point=/oracle/EPP/sapdata2 +epprd_rg:clvaryonvg(2.309):datavg[updatefs:637] : CuAt label attribute for sapdata2lv +epprd_rg:clvaryonvg(2.309):datavg[updatefs:639] clodmget -q 'name = sapdata2lv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.312):datavg[updatefs:639] CuAt_label=/oracle/EPP/sapdata2 +epprd_rg:clvaryonvg(2.314):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.315):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.318):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.318):datavg[updatefs:657] [[ -z /oracle/EPP/sapdata2 ]] +epprd_rg:clvaryonvg(2.318):datavg[updatefs:657] [[ /oracle/EPP/sapdata2 == None ]] +epprd_rg:clvaryonvg(2.318):datavg[updatefs:665] [[ /oracle/EPP/sapdata2 == /oracle/EPP/sapdata2 ]] +epprd_rg:clvaryonvg(2.318):datavg[updatefs:665] [[ /oracle/EPP/sapdata2 != /oracle/EPP/sapdata2 ]] +epprd_rg:clvaryonvg(2.318):datavg[updatefs:685] [[ /oracle/EPP/sapdata2 != /oracle/EPP/sapdata2 ]] +epprd_rg:clvaryonvg(2.318):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.318):datavg[updatefs:600] clodmget -q 'name = sapdata3lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.321):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.321):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.321):datavg[updatefs:607] /usr/sbin/getlvcb -f sapdata3lv +epprd_rg:clvaryonvg(2.339):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(2.339):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.339):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.339):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.339):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.339):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(2.339):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.339):datavg[updatefs:623] : Label and file system type from LVCB on disk for sapdata3lv +epprd_rg:clvaryonvg(2.340):datavg[updatefs:625] getlvcb -T -A sapdata3lv +epprd_rg:clvaryonvg(2.340):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.344):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.346):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.348):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.362):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.362):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.362):datavg[updatefs:632] : Mount point in /etc/filesystems for sapdata3lv +epprd_rg:clvaryonvg(2.363):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/sapdata3lv$' /etc/filesystems +epprd_rg:clvaryonvg(2.366):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.367):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.370):datavg[updatefs:634] fs_mount_point=/oracle/EPP/sapdata3 +epprd_rg:clvaryonvg(2.370):datavg[updatefs:637] : CuAt label attribute for sapdata3lv +epprd_rg:clvaryonvg(2.370):datavg[updatefs:639] clodmget -q 'name = sapdata3lv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.374):datavg[updatefs:639] CuAt_label=/oracle/EPP/sapdata3 +epprd_rg:clvaryonvg(2.375):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.377):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.380):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.380):datavg[updatefs:657] [[ -z /oracle/EPP/sapdata3 ]] +epprd_rg:clvaryonvg(2.380):datavg[updatefs:657] [[ /oracle/EPP/sapdata3 == None ]] +epprd_rg:clvaryonvg(2.380):datavg[updatefs:665] [[ /oracle/EPP/sapdata3 == /oracle/EPP/sapdata3 ]] +epprd_rg:clvaryonvg(2.380):datavg[updatefs:665] [[ /oracle/EPP/sapdata3 != /oracle/EPP/sapdata3 ]] +epprd_rg:clvaryonvg(2.380):datavg[updatefs:685] [[ /oracle/EPP/sapdata3 != /oracle/EPP/sapdata3 ]] +epprd_rg:clvaryonvg(2.380):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.380):datavg[updatefs:600] clodmget -q 'name = sapdata4lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.383):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.383):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.383):datavg[updatefs:607] /usr/sbin/getlvcb -f sapdata4lv +epprd_rg:clvaryonvg(2.401):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(2.401):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.401):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.401):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.401):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.401):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(2.401):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.401):datavg[updatefs:623] : Label and file system type from LVCB on disk for sapdata4lv +epprd_rg:clvaryonvg(2.402):datavg[updatefs:625] getlvcb -T -A sapdata4lv +epprd_rg:clvaryonvg(2.402):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.405):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.408):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.410):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.423):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.423):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.423):datavg[updatefs:632] : Mount point in /etc/filesystems for sapdata4lv +epprd_rg:clvaryonvg(2.425):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/sapdata4lv$' /etc/filesystems +epprd_rg:clvaryonvg(2.427):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.429):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.432):datavg[updatefs:634] fs_mount_point=/oracle/EPP/sapdata4 +epprd_rg:clvaryonvg(2.432):datavg[updatefs:637] : CuAt label attribute for sapdata4lv +epprd_rg:clvaryonvg(2.432):datavg[updatefs:639] clodmget -q 'name = sapdata4lv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.435):datavg[updatefs:639] CuAt_label=/oracle/EPP/sapdata4 +epprd_rg:clvaryonvg(2.437):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.438):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.441):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.442):datavg[updatefs:657] [[ -z /oracle/EPP/sapdata4 ]] +epprd_rg:clvaryonvg(2.442):datavg[updatefs:657] [[ /oracle/EPP/sapdata4 == None ]] +epprd_rg:clvaryonvg(2.442):datavg[updatefs:665] [[ /oracle/EPP/sapdata4 == /oracle/EPP/sapdata4 ]] +epprd_rg:clvaryonvg(2.442):datavg[updatefs:665] [[ /oracle/EPP/sapdata4 != /oracle/EPP/sapdata4 ]] +epprd_rg:clvaryonvg(2.442):datavg[updatefs:685] [[ /oracle/EPP/sapdata4 != /oracle/EPP/sapdata4 ]] +epprd_rg:clvaryonvg(2.442):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.442):datavg[updatefs:600] clodmget -q 'name = boardlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.445):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.445):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.445):datavg[updatefs:607] /usr/sbin/getlvcb -f boardlv +epprd_rg:clvaryonvg(2.463):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(2.463):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.463):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.463):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.463):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.463):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(2.463):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.463):datavg[updatefs:623] : Label and file system type from LVCB on disk for boardlv +epprd_rg:clvaryonvg(2.464):datavg[updatefs:625] getlvcb -T -A boardlv +epprd_rg:clvaryonvg(2.464):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.467):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.470):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.472):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.485):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.485):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.485):datavg[updatefs:632] : Mount point in /etc/filesystems for boardlv +epprd_rg:clvaryonvg(2.486):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/boardlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.489):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.490):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.494):datavg[updatefs:634] fs_mount_point=/board_org +epprd_rg:clvaryonvg(2.494):datavg[updatefs:637] : CuAt label attribute for boardlv +epprd_rg:clvaryonvg(2.494):datavg[updatefs:639] clodmget -q 'name = boardlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.497):datavg[updatefs:639] CuAt_label=/board_org +epprd_rg:clvaryonvg(2.499):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.500):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.503):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.503):datavg[updatefs:657] [[ -z /board_org ]] +epprd_rg:clvaryonvg(2.503):datavg[updatefs:657] [[ /board_org == None ]] +epprd_rg:clvaryonvg(2.503):datavg[updatefs:665] [[ /board_org == /board_org ]] +epprd_rg:clvaryonvg(2.503):datavg[updatefs:665] [[ /board_org != /board_org ]] +epprd_rg:clvaryonvg(2.503):datavg[updatefs:685] [[ /board_org != /board_org ]] +epprd_rg:clvaryonvg(2.503):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.503):datavg[updatefs:600] clodmget -q 'name = origlogAlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.507):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.507):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.507):datavg[updatefs:607] /usr/sbin/getlvcb -f origlogAlv +epprd_rg:clvaryonvg(2.524):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(2.524):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.524):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.524):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.524):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.524):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(2.524):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.524):datavg[updatefs:623] : Label and file system type from LVCB on disk for origlogAlv +epprd_rg:clvaryonvg(2.525):datavg[updatefs:625] getlvcb -T -A origlogAlv +epprd_rg:clvaryonvg(2.525):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.529):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.532):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.534):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.546):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.546):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.546):datavg[updatefs:632] : Mount point in /etc/filesystems for origlogAlv +epprd_rg:clvaryonvg(2.548):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/origlogAlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.550):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.551):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.555):datavg[updatefs:634] fs_mount_point=/oracle/EPP/origlogA +epprd_rg:clvaryonvg(2.555):datavg[updatefs:637] : CuAt label attribute for origlogAlv +epprd_rg:clvaryonvg(2.556):datavg[updatefs:639] clodmget -q 'name = origlogAlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.559):datavg[updatefs:639] CuAt_label=/oracle/EPP/origlogA +epprd_rg:clvaryonvg(2.560):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.561):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.565):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.565):datavg[updatefs:657] [[ -z /oracle/EPP/origlogA ]] +epprd_rg:clvaryonvg(2.565):datavg[updatefs:657] [[ /oracle/EPP/origlogA == None ]] +epprd_rg:clvaryonvg(2.565):datavg[updatefs:665] [[ /oracle/EPP/origlogA == /oracle/EPP/origlogA ]] +epprd_rg:clvaryonvg(2.565):datavg[updatefs:665] [[ /oracle/EPP/origlogA != /oracle/EPP/origlogA ]] +epprd_rg:clvaryonvg(2.565):datavg[updatefs:685] [[ /oracle/EPP/origlogA != /oracle/EPP/origlogA ]] +epprd_rg:clvaryonvg(2.565):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.565):datavg[updatefs:600] clodmget -q 'name = origlogBlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.568):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.568):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.569):datavg[updatefs:607] /usr/sbin/getlvcb -f origlogBlv +epprd_rg:clvaryonvg(2.586):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(2.586):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.586):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.586):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.586):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.586):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(2.586):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.586):datavg[updatefs:623] : Label and file system type from LVCB on disk for origlogBlv +epprd_rg:clvaryonvg(2.587):datavg[updatefs:625] getlvcb -T -A origlogBlv +epprd_rg:clvaryonvg(2.587):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.590):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.593):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.595):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.608):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.608):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.608):datavg[updatefs:632] : Mount point in /etc/filesystems for origlogBlv +epprd_rg:clvaryonvg(2.609):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/origlogBlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.612):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.613):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.617):datavg[updatefs:634] fs_mount_point=/oracle/EPP/origlogB +epprd_rg:clvaryonvg(2.617):datavg[updatefs:637] : CuAt label attribute for origlogBlv +epprd_rg:clvaryonvg(2.617):datavg[updatefs:639] clodmget -q 'name = origlogBlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.620):datavg[updatefs:639] CuAt_label=/oracle/EPP/origlogB +epprd_rg:clvaryonvg(2.621):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.623):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.626):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.626):datavg[updatefs:657] [[ -z /oracle/EPP/origlogB ]] +epprd_rg:clvaryonvg(2.626):datavg[updatefs:657] [[ /oracle/EPP/origlogB == None ]] +epprd_rg:clvaryonvg(2.626):datavg[updatefs:665] [[ /oracle/EPP/origlogB == /oracle/EPP/origlogB ]] +epprd_rg:clvaryonvg(2.626):datavg[updatefs:665] [[ /oracle/EPP/origlogB != /oracle/EPP/origlogB ]] +epprd_rg:clvaryonvg(2.626):datavg[updatefs:685] [[ /oracle/EPP/origlogB != /oracle/EPP/origlogB ]] +epprd_rg:clvaryonvg(2.626):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.626):datavg[updatefs:600] clodmget -q 'name = mirrlogAlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.629):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.629):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.629):datavg[updatefs:607] /usr/sbin/getlvcb -f mirrlogAlv +epprd_rg:clvaryonvg(2.647):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(2.647):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.647):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.647):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.647):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.647):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(2.647):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.647):datavg[updatefs:623] : Label and file system type from LVCB on disk for mirrlogAlv +epprd_rg:clvaryonvg(2.648):datavg[updatefs:625] getlvcb -T -A mirrlogAlv +epprd_rg:clvaryonvg(2.648):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.651):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.654):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.656):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.672):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.672):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.672):datavg[updatefs:632] : Mount point in /etc/filesystems for mirrlogAlv +epprd_rg:clvaryonvg(2.674):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/mirrlogAlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.676):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.678):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.681):datavg[updatefs:634] fs_mount_point=/oracle/EPP/mirrlogA +epprd_rg:clvaryonvg(2.681):datavg[updatefs:637] : CuAt label attribute for mirrlogAlv +epprd_rg:clvaryonvg(2.681):datavg[updatefs:639] clodmget -q 'name = mirrlogAlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.684):datavg[updatefs:639] CuAt_label=/oracle/EPP/mirrlogA +epprd_rg:clvaryonvg(2.686):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.687):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.690):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.690):datavg[updatefs:657] [[ -z /oracle/EPP/mirrlogA ]] +epprd_rg:clvaryonvg(2.690):datavg[updatefs:657] [[ /oracle/EPP/mirrlogA == None ]] +epprd_rg:clvaryonvg(2.690):datavg[updatefs:665] [[ /oracle/EPP/mirrlogA == /oracle/EPP/mirrlogA ]] +epprd_rg:clvaryonvg(2.690):datavg[updatefs:665] [[ /oracle/EPP/mirrlogA != /oracle/EPP/mirrlogA ]] +epprd_rg:clvaryonvg(2.690):datavg[updatefs:685] [[ /oracle/EPP/mirrlogA != /oracle/EPP/mirrlogA ]] +epprd_rg:clvaryonvg(2.690):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.690):datavg[updatefs:600] clodmget -q 'name = mirrlogBlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.694):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.694):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.694):datavg[updatefs:607] /usr/sbin/getlvcb -f mirrlogBlv +epprd_rg:clvaryonvg(2.711):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false ' +epprd_rg:clvaryonvg(2.711):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.711):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.711):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.711):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.711):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false ' ]] +epprd_rg:clvaryonvg(2.711):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.711):datavg[updatefs:623] : Label and file system type from LVCB on disk for mirrlogBlv +epprd_rg:clvaryonvg(2.712):datavg[updatefs:625] getlvcb -T -A mirrlogBlv +epprd_rg:clvaryonvg(2.713):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.716):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.719):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.721):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.734):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.734):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.734):datavg[updatefs:632] : Mount point in /etc/filesystems for mirrlogBlv +epprd_rg:clvaryonvg(2.735):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/mirrlogBlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.738):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.739):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.743):datavg[updatefs:634] fs_mount_point=/oracle/EPP/mirrlogB +epprd_rg:clvaryonvg(2.743):datavg[updatefs:637] : CuAt label attribute for mirrlogBlv +epprd_rg:clvaryonvg(2.743):datavg[updatefs:639] clodmget -q 'name = mirrlogBlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.746):datavg[updatefs:639] CuAt_label=/oracle/EPP/mirrlogB +epprd_rg:clvaryonvg(2.748):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.749):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.752):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.752):datavg[updatefs:657] [[ -z /oracle/EPP/mirrlogB ]] +epprd_rg:clvaryonvg(2.752):datavg[updatefs:657] [[ /oracle/EPP/mirrlogB == None ]] +epprd_rg:clvaryonvg(2.752):datavg[updatefs:665] [[ /oracle/EPP/mirrlogB == /oracle/EPP/mirrlogB ]] +epprd_rg:clvaryonvg(2.752):datavg[updatefs:665] [[ /oracle/EPP/mirrlogB != /oracle/EPP/mirrlogB ]] +epprd_rg:clvaryonvg(2.752):datavg[updatefs:685] [[ /oracle/EPP/mirrlogB != /oracle/EPP/mirrlogB ]] +epprd_rg:clvaryonvg(2.752):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.752):datavg[updatefs:600] clodmget -q 'name = epprdaloglv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.756):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.756):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.756):datavg[updatefs:607] /usr/sbin/getlvcb -f epprdaloglv +epprd_rg:clvaryonvg(2.773):datavg[updatefs:607] fs_info=' ' +epprd_rg:clvaryonvg(2.773):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.773):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.773):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.773):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.773):datavg[updatefs:618] [[ -z ' ' ]] +epprd_rg:clvaryonvg(2.773):datavg[updatefs:618] [[ ' ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.773):datavg[updatefs:620] continue +epprd_rg:clvaryonvg(2.773):datavg[1641] : At this point, the volume should be varied on, so get the current +epprd_rg:clvaryonvg(2.773):datavg[1642] : timestamp if needed +epprd_rg:clvaryonvg(2.773):datavg[1644] vgdatimestamps +epprd_rg:clvaryonvg(2.773):datavg[vgdatimestamps:201] PS4_FUNC=vgdatimestamps +epprd_rg:clvaryonvg(2.773):datavg[vgdatimestamps:201] typeset PS4_FUNC +epprd_rg:clvaryonvg(2.773):datavg[vgdatimestamps:202] [[ high == high ]] +epprd_rg:clvaryonvg(2.773):datavg[vgdatimestamps:202] set -x +epprd_rg:clvaryonvg(2.773):datavg[vgdatimestamps:203] set -u +epprd_rg:clvaryonvg(2.773):datavg[vgdatimestamps:206] : See what timestamp LVM has recorded from the last time it checked +epprd_rg:clvaryonvg(2.773):datavg[vgdatimestamps:207] : the disks +epprd_rg:clvaryonvg(2.773):datavg[vgdatimestamps:209] /usr/sbin/getlvodm -T 00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(2.774):datavg[vgdatimestamps:209] 2> /dev/null +epprd_rg:clvaryonvg(2.777):datavg[vgdatimestamps:209] TS_FROM_ODM=63d4d87b2421bec0 +epprd_rg:clvaryonvg(2.777):datavg[vgdatimestamps:212] : Check to see if HACMP is maintaining a timestamp for this volume group +epprd_rg:clvaryonvg(2.777):datavg[vgdatimestamps:213] : Needed for some older volume groups +epprd_rg:clvaryonvg(2.777):datavg[vgdatimestamps:215] [[ -s /usr/es/sbin/cluster/etc/vg/datavg.tstamp ]] +epprd_rg:clvaryonvg(2.777):datavg[vgdatimestamps:234] : Get the time stamp from the actual disk +epprd_rg:clvaryonvg(2.777):datavg[vgdatimestamps:236] clvgdats /dev/datavg +epprd_rg:clvaryonvg(2.777):datavg[vgdatimestamps:236] 2> /dev/null +epprd_rg:clvaryonvg(2.787):datavg[vgdatimestamps:236] TS_FROM_DISK=63d4d87b2421bec0 +epprd_rg:clvaryonvg(2.787):datavg[vgdatimestamps:237] clvgdats_rc=0 +epprd_rg:clvaryonvg(2.787):datavg[vgdatimestamps:238] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.787):datavg[vgdatimestamps:247] [[ -z 63d4d87b2421bec0 ]] +epprd_rg:clvaryonvg(2.787):datavg[1645] [[ -z 63d4d87b2421bec0 ]] +epprd_rg:clvaryonvg(2.787):datavg[1656] : Finally, leave the volume in the requested state - on or off +epprd_rg:clvaryonvg(2.787):datavg[1658] [[ FALSE == TRUE ]] +epprd_rg:clvaryonvg(2.787):datavg[1665] (( 0 == 0 )) +epprd_rg:clvaryonvg(2.787):datavg[1668] : Synchronize time stamps globally +epprd_rg:clvaryonvg(2.787):datavg[1670] cl_update_vg_odm_ts -o datavg +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[77] version=1.13 +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[121] o_flag='' +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[122] f_flag='' +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[123] getopts :of option +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[126] : Local timestamps should be good, since volume group was +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[127] : just varyied on or off +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[128] o_flag=TRUE +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[123] getopts :of option +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[142] shift 1 +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[144] vg_name=datavg +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[145] [[ -z datavg ]] +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[151] shift +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[152] node_list='' +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[153] /usr/es/sbin/cluster/utilities/cl_get_path all +epprd_rg:cl_update_vg_odm_ts(0.004):datavg[153] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin +epprd_rg:cl_update_vg_odm_ts(0.004):datavg[155] [[ -z '' ]] +epprd_rg:cl_update_vg_odm_ts(0.004):datavg[158] : Check to see if this update is necessary - some LVM levels automatically +epprd_rg:cl_update_vg_odm_ts(0.004):datavg[159] : update volume group timestamps clusterwide. +epprd_rg:cl_update_vg_odm_ts(0.004):datavg[163] instfix -iqk IV74100 +epprd_rg:cl_update_vg_odm_ts(0.005):datavg[163] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.012):datavg[164] instfix -iqk IV74883 +epprd_rg:cl_update_vg_odm_ts(0.012):datavg[164] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.019):datavg[165] instfix -iqk IV74698 +epprd_rg:cl_update_vg_odm_ts(0.020):datavg[165] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.026):datavg[166] instfix -iqk IV74246 +epprd_rg:cl_update_vg_odm_ts(0.027):datavg[166] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.034):datavg[174] emgr -l -L IV74883 +epprd_rg:cl_update_vg_odm_ts(0.035):datavg[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.302):datavg[174] emgr -l -L IV74698 +epprd_rg:cl_update_vg_odm_ts(0.303):datavg[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.571):datavg[174] emgr -l -L IV74246 +epprd_rg:cl_update_vg_odm_ts(0.571):datavg[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.840):datavg[183] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_update_vg_odm_ts(0.840):datavg[184] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_update_vg_odm_ts(0.840):datavg[185] : 99.99.999.999 +epprd_rg:cl_update_vg_odm_ts(0.840):datavg[187] typeset -li V R M F +epprd_rg:cl_update_vg_odm_ts(0.840):datavg[188] typeset -Z2 V +epprd_rg:cl_update_vg_odm_ts(0.840):datavg[189] typeset -Z2 R +epprd_rg:cl_update_vg_odm_ts(0.840):datavg[190] typeset -Z3 M +epprd_rg:cl_update_vg_odm_ts(0.840):datavg[191] typeset -Z3 F +epprd_rg:cl_update_vg_odm_ts(0.840):datavg[192] lvm_lvl6=601008015 +epprd_rg:cl_update_vg_odm_ts(0.840):datavg[192] typeset -li lvm_lvl6 +epprd_rg:cl_update_vg_odm_ts(0.840):datavg[194] lvm_lvl7=701003046 +epprd_rg:cl_update_vg_odm_ts(0.840):datavg[194] typeset -li lvm_lvl7 +epprd_rg:cl_update_vg_odm_ts(0.840):datavg[195] VRMF=0 +epprd_rg:cl_update_vg_odm_ts(0.840):datavg[195] typeset -li VRMF +epprd_rg:cl_update_vg_odm_ts(0.840):datavg[198] : Here try and figure out what level of LVM is installed +epprd_rg:cl_update_vg_odm_ts(0.841):datavg[200] lslpp -lcqOr bos.rte.lvm +epprd_rg:cl_update_vg_odm_ts(0.844):datavg[200] cut -f3 -d: +epprd_rg:cl_update_vg_odm_ts(0.845):datavg[200] read V R M F +epprd_rg:cl_update_vg_odm_ts(0.846):datavg[200] IFS=. +epprd_rg:cl_update_vg_odm_ts(0.846):datavg[201] VRMF=0702005101 +epprd_rg:cl_update_vg_odm_ts(0.846):datavg[203] (( 7 == 6 && 702005101 >= 601008015 )) +epprd_rg:cl_update_vg_odm_ts(0.846):datavg[204] (( 702005101 >= 701003046 )) +epprd_rg:cl_update_vg_odm_ts(0.846):datavg[207] : LVM at a level in which timestamp update is unnecessary +epprd_rg:cl_update_vg_odm_ts(0.846):datavg[209] return 0 +epprd_rg:clvaryonvg(3.637):datavg[1674] : On successful varyon, clean up any files used to track errors with +epprd_rg:clvaryonvg(3.637):datavg[1675] : this volume group +epprd_rg:clvaryonvg(3.637):datavg[1677] rm -f /usr/es/sbin/cluster/etc/vg/datavg.desc /usr/es/sbin/cluster/etc/vg/datavg.replay /usr/es/sbin/cluster/etc/vg/datavg.perms /usr/es/sbin/cluster/etc/vg/datavg.tstamp /usr/es/sbin/cluster/etc/vg/datavg.fail +epprd_rg:clvaryonvg(3.639):datavg[1680] : Note that a sync has not been done on the volume group at this point. +epprd_rg:clvaryonvg(3.639):datavg[1681] : A sync is kicked off in cl_sync_vgs, once any filesystem mounts are +epprd_rg:clvaryonvg(3.639):datavg[1682] : complete. A sync at this time would interfere with the mounts +epprd_rg:clvaryonvg(3.639):datavg[1685] return 0 +epprd_rg:cl_activate_vgs(3.724):datavg[vgs_chk:103] ERRMSG=$'cl_set_vg_fence_height[126]: version @(#)10\t1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37\ncl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY)\ncl_set_vg_fence_height[214]: read(datavg, 16)\ncl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid)\ncl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0))' +epprd_rg:cl_activate_vgs(3.725):datavg[vgs_chk:104] RC=0 +epprd_rg:cl_activate_vgs(3.725):datavg[vgs_chk:107] (( 0 == 1 || 0 == 20 )) +epprd_rg:cl_activate_vgs(3.725):datavg[vgs_chk:115] : exit status of clvaryonvg -n datavg: 0 +epprd_rg:cl_activate_vgs(3.725):datavg[vgs_chk:117] [[ -n $'cl_set_vg_fence_height[126]: version @(#)10\t1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37\ncl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY)\ncl_set_vg_fence_height[214]: read(datavg, 16)\ncl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid)\ncl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0))' ]] +epprd_rg:cl_activate_vgs(3.725):datavg[vgs_chk:117] (( 0 != 1 )) +epprd_rg:cl_activate_vgs(3.725):datavg[vgs_chk:119] cl_echo 286 $'cl_activate_vgs: Successful clvaryonvg of datavg with message cl_set_vg_fence_height[126]: version @(#)10\t1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37\ncl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY)\ncl_set_vg_fence_height[214]: read(datavg, 16)\ncl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid)\ncl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0)).' cl_activate_vgs datavg 'cl_set_vg_fence_height[126]:' version '@(#)10' 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 'cl_set_vg_fence_height[180]:' 'open(/usr/es/sbin/cluster/etc/vg/datavg.uuid,' 'O_RDONLY)' 'cl_set_vg_fence_height[214]:' 'read(datavg,' '16)' 'cl_set_vg_fence_height[237]:' 'close(/usr/es/sbin/cluster/etc/vg/datavg.uuid)' 'cl_set_vg_fence_height[265]:' 'sfwSetFenceGroup(vg=datavg' uuid=ec2db4422261eae02091227fb9e53c88 height='rw(0))' Jan 28 2023 17:10:39cl_activate_vgs: Successful clvaryonvg of datavg with message cl_set_vg_fence_height[126]: version @(#)10 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 cl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY) cl_set_vg_fence_height[214]: read(datavg, 16) cl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0)).+epprd_rg:cl_activate_vgs(3.743):datavg[vgs_chk:123] [[ 0 != 0 ]] +epprd_rg:cl_activate_vgs(3.743):datavg[vgs_chk:127] amlog_trace '' 'Activating Volume Group|datavg' +epprd_rg:cl_activate_vgs(3.743):datavg[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_vgs(3.744):datavg[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_vgs(3.769):datavg[amlog_trace:319] cltime +epprd_rg:cl_activate_vgs(3.771):datavg[amlog_trace:319] DATE=2023-01-28T17:10:39.088403 +epprd_rg:cl_activate_vgs(3.771):datavg[amlog_trace:320] echo '|2023-01-28T17:10:39.088403|INFO: Activating Volume Group|datavg' +epprd_rg:cl_activate_vgs(3.771):datavg[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_vgs(3.771):datavg[vgs_chk:132] echo datavg 0 +epprd_rg:cl_activate_vgs(3.771):datavg[vgs_chk:132] 1>> /tmp/_activate_vgs.tmp +epprd_rg:cl_activate_vgs(3.771):datavg[vgs_chk:133] return 0 +epprd_rg:cl_activate_vgs:datavg[vgs_list:198] unset PS4_LOOP PS4_TIMER +epprd_rg:cl_activate_vgs[304] wait +epprd_rg:cl_activate_vgs[310] ALLNOERRVGS=All_nonerror_volume_groups +epprd_rg:cl_activate_vgs[311] cl_RMupdate resource_up All_nonerror_volume_groups cl_activate_vgs 2023-01-28T17:10:39.112256 2023-01-28T17:10:39.116473 +epprd_rg:cl_activate_vgs[318] [[ -f /tmp/_activate_vgs.tmp ]] +epprd_rg:cl_activate_vgs[320] grep ' 1' /tmp/_activate_vgs.tmp +epprd_rg:cl_activate_vgs[329] rm -f /tmp/_activate_vgs.tmp +epprd_rg:cl_activate_vgs[332] exit 0 +epprd_rg:process_resources[process_volume_groups:2584] RC=0 +epprd_rg:process_resources[process_volume_groups:2585] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[process_volume_groups:2598] (( 0 != 0 )) +epprd_rg:process_resources[process_volume_groups:2627] return 0 +epprd_rg:process_resources[process_volume_groups_main:2556] STAT=0 +epprd_rg:process_resources[process_volume_groups_main:2559] return 0 +epprd_rg:process_resources[3572] RC=0 +epprd_rg:process_resources[3573] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T17:10:39.134184 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=LOGREDO ACTION=ACQUIRE VOLUME_GROUPS='"datavg"' RESOURCE_GROUPS='"epprd_rg' '"' +epprd_rg:process_resources[1] JOB_TYPE=LOGREDO +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] VOLUME_GROUPS=datavg +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ LOGREDO == RELEASE ]] +epprd_rg:process_resources[3360] [[ LOGREDO == ONLINE ]] +epprd_rg:process_resources[3634] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[3635] logredo_volume_groups +epprd_rg:process_resources[logredo_volume_groups:2745] PS4_FUNC=logredo_volume_groups +epprd_rg:process_resources[logredo_volume_groups:2745] typeset PS4_FUNC +epprd_rg:process_resources(4.794)[logredo_volume_groups:2746] PS4_TIMER=true +epprd_rg:process_resources(4.794)[logredo_volume_groups:2746] typeset PS4_TIMER +epprd_rg:process_resources(4.794)[logredo_volume_groups:2747] [[ high == high ]] +epprd_rg:process_resources(4.794)[logredo_volume_groups:2747] set -x +epprd_rg:process_resources(4.794)[logredo_volume_groups:2749] TMP_FILE=/var/hacmp/log/.process_resources_logredo.23593416 +epprd_rg:process_resources(4.794)[logredo_volume_groups:2749] export TMP_FILE +epprd_rg:process_resources(4.794)[logredo_volume_groups:2750] rm -f '/var/hacmp/log/.process_resources_logredo*' +epprd_rg:process_resources(4.797)[logredo_volume_groups:2752] STAT=0 +epprd_rg:process_resources(4.797)[logredo_volume_groups:2755] export GROUPNAME +epprd_rg:process_resources(4.798)[logredo_volume_groups:2757] get_list_head datavg +epprd_rg:process_resources(4.798)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(4.798)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(4.798)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(4.798)[get_list_head:60] set -x +epprd_rg:process_resources(4.799)[get_list_head:61] echo datavg +epprd_rg:process_resources(4.801)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(4.801)[get_list_head:61] IFS=: +epprd_rg:process_resources(4.802)[get_list_head:62] echo datavg +epprd_rg:process_resources(4.803)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(4.801)[logredo_volume_groups:2757] read LIST_OF_VOLUME_GROUPS_FOR_RG +epprd_rg:process_resources(4.806)[logredo_volume_groups:2758] get_list_tail datavg +epprd_rg:process_resources(4.806)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(4.807)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(4.807)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(4.807)[get_list_tail:68] set -x +epprd_rg:process_resources(4.808)[get_list_tail:69] echo datavg +epprd_rg:process_resources(4.811)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(4.811)[get_list_tail:69] IFS=: +epprd_rg:process_resources(4.811)[get_list_tail:70] echo +epprd_rg:process_resources(4.811)[logredo_volume_groups:2758] read VOLUME_GROUPS +epprd_rg:process_resources(4.813)[logredo_volume_groups:2761] : Run logredo on all JFS/JFS2 log devices to assure FS consistency +epprd_rg:process_resources(4.813)[logredo_volume_groups:2763] ALL_LVs='' +epprd_rg:process_resources(4.813)[logredo_volume_groups:2764] lv_all='' +epprd_rg:process_resources(4.813)[logredo_volume_groups:2765] mount_fs='' +epprd_rg:process_resources(4.813)[logredo_volume_groups:2766] fsck_check='' +epprd_rg:process_resources(4.813)[logredo_volume_groups:2767] MOUNTGUARD='' +epprd_rg:process_resources(4.813)[logredo_volume_groups:2768] FMMOUNT_OUT='' +epprd_rg:process_resources(4.813)[logredo_volume_groups:2769] FMMOUNT='' +epprd_rg:process_resources(4.815)[logredo_volume_groups:2772] tail +3 +epprd_rg:process_resources(4.814)[logredo_volume_groups:2772] lsvg -lL datavg +epprd_rg:process_resources(4.814)[logredo_volume_groups:2772] LC_ALL=C +epprd_rg:process_resources(4.815)[logredo_volume_groups:2772] 1>> /var/hacmp/log/.process_resources_logredo.23593416 +epprd_rg:process_resources(4.838)[logredo_volume_groups:2774] cat /var/hacmp/log/.process_resources_logredo.23593416 +epprd_rg:process_resources(4.841)[logredo_volume_groups:2774] awk '{print $1}' +epprd_rg:process_resources(4.845)[logredo_volume_groups:2774] ALL_LVs=$'epprdaloglv\nsaplv\nsapmntlv\noraclelv\nepplv\noraarchlv\nsapdata1lv\nsapdata2lv\nsapdata3lv\nsapdata4lv\nboardlv\noriglogAlv\noriglogBlv\nmirrlogAlv\nmirrlogBlv' +epprd_rg:process_resources(4.845)[logredo_volume_groups:2777] : Verify if any of the file system associated with volume group datavg +epprd_rg:process_resources(4.845)[logredo_volume_groups:2778] : is already mounted anywhere else in the cluster. +epprd_rg:process_resources(4.845)[logredo_volume_groups:2779] : If it is already mounted somewhere else, we dont want to continue +epprd_rg:process_resources(4.845)[logredo_volume_groups:2780] : here to avoid data corruption. +epprd_rg:process_resources(4.847)[logredo_volume_groups:2782] cat /var/hacmp/log/.process_resources_logredo.23593416 +epprd_rg:process_resources(4.850)[logredo_volume_groups:2782] grep -v N/A +epprd_rg:process_resources(4.852)[logredo_volume_groups:2782] awk '{print $1}' +epprd_rg:process_resources(4.857)[logredo_volume_groups:2782] lv_all=$'saplv\nsapmntlv\noraclelv\nepplv\noraarchlv\nsapdata1lv\nsapdata2lv\nsapdata3lv\nsapdata4lv\nboardlv\noriglogAlv\noriglogBlv\nmirrlogAlv\nmirrlogBlv' +epprd_rg:process_resources(4.857)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.857)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.859)[logredo_volume_groups:2789] lsfs -qc saplv +epprd_rg:process_resources(4.859)[logredo_volume_groups:2789] LC_ALL=C lsfs: No record matching '/var/hacmp/saplv' was found in /etc/filesystems. +epprd_rg:process_resources(4.862)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.865)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.867)[logredo_volume_groups:2789] grep -w MountGuard +epprd_rg:process_resources(4.870)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.870)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.870)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.870)[logredo_volume_groups:2795] fsdb saplv +epprd_rg:process_resources(4.871)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.876)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.878)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.880)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.882)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.887)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.887)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.887)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.887)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.887)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.889)[logredo_volume_groups:2789] lsfs -qc sapmntlv +epprd_rg:process_resources(4.889)[logredo_volume_groups:2789] LC_ALL=C lsfs: No record matching '/var/hacmp/sapmntlv' was found in /etc/filesystems. +epprd_rg:process_resources(4.892)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.895)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.897)[logredo_volume_groups:2789] grep -w MountGuard +epprd_rg:process_resources(4.900)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.901)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.901)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.901)[logredo_volume_groups:2795] fsdb sapmntlv +epprd_rg:process_resources(4.902)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.905)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.907)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.909)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.911)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.916)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.916)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.916)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.916)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.916)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.918)[logredo_volume_groups:2789] lsfs -qc oraclelv +epprd_rg:process_resources(4.918)[logredo_volume_groups:2789] LC_ALL=C lsfs: No record matching '/var/hacmp/oraclelv' was found in /etc/filesystems. +epprd_rg:process_resources(4.921)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.923)[logredo_volume_groups:2789] grep -w MountGuard +epprd_rg:process_resources(4.924)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.929)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.929)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.929)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.929)[logredo_volume_groups:2795] fsdb oraclelv +epprd_rg:process_resources(4.930)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.933)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.935)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.937)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.939)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.944)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.944)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.944)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.944)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.944)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.946)[logredo_volume_groups:2789] lsfs -qc epplv +epprd_rg:process_resources(4.946)[logredo_volume_groups:2789] LC_ALL=C lsfs: No record matching '/var/hacmp/epplv' was found in /etc/filesystems. +epprd_rg:process_resources(4.949)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.951)[logredo_volume_groups:2789] grep -w MountGuard +epprd_rg:process_resources(4.953)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.957)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.957)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.957)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.957)[logredo_volume_groups:2795] fsdb epplv +epprd_rg:process_resources(4.958)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.962)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.964)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.966)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.968)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.973)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.973)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.973)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.973)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.973)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.975)[logredo_volume_groups:2789] lsfs -qc oraarchlv +epprd_rg:process_resources(4.975)[logredo_volume_groups:2789] LC_ALL=C lsfs: No record matching '/var/hacmp/oraarchlv' was found in /etc/filesystems. +epprd_rg:process_resources(4.978)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.980)[logredo_volume_groups:2789] grep -w MountGuard +epprd_rg:process_resources(4.982)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.986)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.986)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.986)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.986)[logredo_volume_groups:2795] fsdb oraarchlv +epprd_rg:process_resources(4.987)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.991)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.993)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.995)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.997)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.002)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.002)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.002)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.002)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.002)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.003)[logredo_volume_groups:2789] lsfs -qc sapdata1lv +epprd_rg:process_resources(5.004)[logredo_volume_groups:2789] LC_ALL=C lsfs: No record matching '/var/hacmp/sapdata1lv' was found in /etc/filesystems. +epprd_rg:process_resources(5.007)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.009)[logredo_volume_groups:2789] grep -w MountGuard +epprd_rg:process_resources(5.011)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.015)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.015)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.015)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.015)[logredo_volume_groups:2795] fsdb sapdata1lv +epprd_rg:process_resources(5.016)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.019)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.021)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.023)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.024)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.030)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.030)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.030)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.030)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.030)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.032)[logredo_volume_groups:2789] lsfs -qc sapdata2lv +epprd_rg:process_resources(5.032)[logredo_volume_groups:2789] LC_ALL=C lsfs: No record matching '/var/hacmp/sapdata2lv' was found in /etc/filesystems. +epprd_rg:process_resources(5.035)[logredo_volume_groups:2789] grep -w MountGuard +epprd_rg:process_resources(5.037)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.039)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.043)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.043)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.043)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.043)[logredo_volume_groups:2795] fsdb sapdata2lv +epprd_rg:process_resources(5.044)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.047)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.048)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.050)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.052)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.057)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.057)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.057)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.057)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.057)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.059)[logredo_volume_groups:2789] lsfs -qc sapdata3lv +epprd_rg:process_resources(5.060)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.060)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.061)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/sapdata3lv' was found in /etc/filesystems. +epprd_rg:process_resources(5.062)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.066)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.066)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.066)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.066)[logredo_volume_groups:2795] fsdb sapdata3lv +epprd_rg:process_resources(5.067)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.070)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.072)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.073)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.073)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.078)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.078)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.078)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.078)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.078)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.080)[logredo_volume_groups:2789] lsfs -qc sapdata4lv +epprd_rg:process_resources(5.081)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.081)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.082)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/sapdata4lv' was found in /etc/filesystems. +epprd_rg:process_resources(5.083)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.087)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.087)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.087)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.087)[logredo_volume_groups:2795] fsdb sapdata4lv +epprd_rg:process_resources(5.088)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.091)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.094)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.094)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.094)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.099)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.099)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.099)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.100)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.100)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.102)[logredo_volume_groups:2789] lsfs -qc boardlv +epprd_rg:process_resources(5.102)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.102)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.103)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/boardlv' was found in /etc/filesystems. +epprd_rg:process_resources(5.104)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.108)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.108)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.108)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.108)[logredo_volume_groups:2795] fsdb boardlv +epprd_rg:process_resources(5.109)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.112)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.114)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.115)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.115)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.120)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.120)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.120)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.121)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.121)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.123)[logredo_volume_groups:2789] lsfs -qc origlogAlv +epprd_rg:process_resources(5.123)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.123)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.124)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/origlogAlv' was found in /etc/filesystems. +epprd_rg:process_resources(5.125)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.129)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.129)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.129)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.129)[logredo_volume_groups:2795] fsdb origlogAlv +epprd_rg:process_resources(5.130)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.134)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.136)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.136)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.136)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.142)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.142)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.142)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.142)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.142)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.144)[logredo_volume_groups:2789] lsfs -qc origlogBlv +epprd_rg:process_resources(5.144)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.145)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.145)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/origlogBlv' was found in /etc/filesystems. +epprd_rg:process_resources(5.146)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.150)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.150)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.150)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.151)[logredo_volume_groups:2795] fsdb origlogBlv +epprd_rg:process_resources(5.152)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.155)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.157)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.157)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.158)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.163)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.163)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.163)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.163)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.163)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.165)[logredo_volume_groups:2789] lsfs -qc mirrlogAlv +epprd_rg:process_resources(5.165)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.166)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.166)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/mirrlogAlv' was found in /etc/filesystems. +epprd_rg:process_resources(5.167)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.172)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.172)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.172)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.172)[logredo_volume_groups:2795] fsdb mirrlogAlv +epprd_rg:process_resources(5.173)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.176)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.178)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.179)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.179)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.184)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.184)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.184)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.184)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.184)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.186)[logredo_volume_groups:2789] lsfs -qc mirrlogBlv +epprd_rg:process_resources(5.186)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.187)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.187)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/mirrlogBlv' was found in /etc/filesystems. +epprd_rg:process_resources(5.189)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.193)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.193)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.193)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.193)[logredo_volume_groups:2795] fsdb mirrlogBlv +epprd_rg:process_resources(5.194)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.197)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.199)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.200)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.200)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.205)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.205)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.205)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.205)[logredo_volume_groups:2814] comm_failure='' +epprd_rg:process_resources(5.205)[logredo_volume_groups:2815] rc_mount='' +epprd_rg:process_resources(5.205)[logredo_volume_groups:2816] [[ -n '' ]] +epprd_rg:process_resources(5.205)[logredo_volume_groups:2851] logdevs='' +epprd_rg:process_resources(5.205)[logredo_volume_groups:2852] HAVE_GEO='' +epprd_rg:process_resources(5.205)[logredo_volume_groups:2853] lslpp -l 'hageo.*' +epprd_rg:process_resources(5.206)[logredo_volume_groups:2853] 1> /dev/null 2>& 1 +epprd_rg:process_resources(5.209)[logredo_volume_groups:2854] lslpp -l 'geoRM.*' +epprd_rg:process_resources(5.210)[logredo_volume_groups:2854] 1> /dev/null 2>& 1 +epprd_rg:process_resources(5.213)[logredo_volume_groups:2874] pattern='jfs*log' +epprd_rg:process_resources(5.213)[logredo_volume_groups:2876] : Any device with the type as log should be added +epprd_rg:process_resources(5.213)[logredo_volume_groups:2882] odmget -q $'name = epprdaloglv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.217)[logredo_volume_groups:2882] [[ -n $'\nCuAt:\n\tname = "epprdaloglv"\n\tattribute = "type"\n\tvalue = "jfs2log"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.217)[logredo_volume_groups:2884] logdevs=' /dev/epprdaloglv' +epprd_rg:process_resources(5.217)[logredo_volume_groups:2882] odmget -q $'name = saplv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.220)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.220)[logredo_volume_groups:2882] odmget -q $'name = sapmntlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.224)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.224)[logredo_volume_groups:2882] odmget -q $'name = oraclelv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.227)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.227)[logredo_volume_groups:2882] odmget -q $'name = epplv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.231)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.231)[logredo_volume_groups:2882] odmget -q $'name = oraarchlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.235)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.235)[logredo_volume_groups:2882] odmget -q $'name = sapdata1lv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.238)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.238)[logredo_volume_groups:2882] odmget -q $'name = sapdata2lv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.242)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.242)[logredo_volume_groups:2882] odmget -q $'name = sapdata3lv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.245)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.245)[logredo_volume_groups:2882] odmget -q $'name = sapdata4lv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.249)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.249)[logredo_volume_groups:2882] odmget -q $'name = boardlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.252)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.252)[logredo_volume_groups:2882] odmget -q $'name = origlogAlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.256)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.256)[logredo_volume_groups:2882] odmget -q $'name = origlogBlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.259)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.260)[logredo_volume_groups:2882] odmget -q $'name = mirrlogAlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.263)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.263)[logredo_volume_groups:2882] odmget -q $'name = mirrlogBlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.267)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.267)[logredo_volume_groups:2889] : JFS2 file systems can have inline logs where the log LV is the same as the FS LV. +epprd_rg:process_resources(5.267)[logredo_volume_groups:2895] odmget $'-qname = epprdaloglv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.270)[logredo_volume_groups:2895] [[ -n '' ]] +epprd_rg:process_resources(5.270)[logredo_volume_groups:2895] odmget $'-qname = saplv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.274)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "saplv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.276)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.276)[logredo_volume_groups:2898] odmget -q 'name = saplv and attribute = label' CuAt +epprd_rg:process_resources(5.280)[logredo_volume_groups:2898] [[ -n /usr/sap ]] +epprd_rg:process_resources(5.282)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.282)[logredo_volume_groups:2900] grep -wp /dev/saplv /etc/filesystems +epprd_rg:process_resources(5.287)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.287)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.287)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/saplv ]] +epprd_rg:process_resources(5.287)[logredo_volume_groups:2895] odmget $'-qname = sapmntlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.291)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "sapmntlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.293)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.293)[logredo_volume_groups:2898] odmget -q 'name = sapmntlv and attribute = label' CuAt +epprd_rg:process_resources(5.297)[logredo_volume_groups:2898] [[ -n /sapmnt ]] +epprd_rg:process_resources(5.299)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.299)[logredo_volume_groups:2900] grep -wp /dev/sapmntlv /etc/filesystems +epprd_rg:process_resources(5.305)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.305)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.305)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/sapmntlv ]] +epprd_rg:process_resources(5.305)[logredo_volume_groups:2895] odmget $'-qname = oraclelv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.308)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "oraclelv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.310)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.310)[logredo_volume_groups:2898] odmget -q 'name = oraclelv and attribute = label' CuAt +epprd_rg:process_resources(5.315)[logredo_volume_groups:2898] [[ -n /oracle ]] +epprd_rg:process_resources(5.317)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.317)[logredo_volume_groups:2900] grep -wp /dev/oraclelv /etc/filesystems +epprd_rg:process_resources(5.322)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.322)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.322)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/oraclelv ]] +epprd_rg:process_resources(5.322)[logredo_volume_groups:2895] odmget $'-qname = epplv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.326)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "epplv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.328)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.328)[logredo_volume_groups:2898] odmget -q 'name = epplv and attribute = label' CuAt +epprd_rg:process_resources(5.332)[logredo_volume_groups:2898] [[ -n /oracle/EPP ]] +epprd_rg:process_resources(5.334)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.334)[logredo_volume_groups:2900] grep -wp /dev/epplv /etc/filesystems +epprd_rg:process_resources(5.339)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.340)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.340)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/epplv ]] +epprd_rg:process_resources(5.340)[logredo_volume_groups:2895] odmget $'-qname = oraarchlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.343)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "oraarchlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.345)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.345)[logredo_volume_groups:2898] odmget -q 'name = oraarchlv and attribute = label' CuAt +epprd_rg:process_resources(5.350)[logredo_volume_groups:2898] [[ -n /oracle/EPP/oraarch ]] +epprd_rg:process_resources(5.352)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.352)[logredo_volume_groups:2900] grep -wp /dev/oraarchlv /etc/filesystems +epprd_rg:process_resources(5.357)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.357)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.357)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/oraarchlv ]] +epprd_rg:process_resources(5.357)[logredo_volume_groups:2895] odmget $'-qname = sapdata1lv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.361)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "sapdata1lv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.363)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.363)[logredo_volume_groups:2898] odmget -q 'name = sapdata1lv and attribute = label' CuAt +epprd_rg:process_resources(5.367)[logredo_volume_groups:2898] [[ -n /oracle/EPP/sapdata1 ]] +epprd_rg:process_resources(5.369)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.369)[logredo_volume_groups:2900] grep -wp /dev/sapdata1lv /etc/filesystems +epprd_rg:process_resources(5.374)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.374)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.374)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/sapdata1lv ]] +epprd_rg:process_resources(5.374)[logredo_volume_groups:2895] odmget $'-qname = sapdata2lv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.378)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "sapdata2lv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.380)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.380)[logredo_volume_groups:2898] odmget -q 'name = sapdata2lv and attribute = label' CuAt +epprd_rg:process_resources(5.384)[logredo_volume_groups:2898] [[ -n /oracle/EPP/sapdata2 ]] +epprd_rg:process_resources(5.387)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.387)[logredo_volume_groups:2900] grep -wp /dev/sapdata2lv /etc/filesystems +epprd_rg:process_resources(5.392)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.392)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.392)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/sapdata2lv ]] +epprd_rg:process_resources(5.392)[logredo_volume_groups:2895] odmget $'-qname = sapdata3lv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.396)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "sapdata3lv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.397)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.398)[logredo_volume_groups:2898] odmget -q 'name = sapdata3lv and attribute = label' CuAt +epprd_rg:process_resources(5.402)[logredo_volume_groups:2898] [[ -n /oracle/EPP/sapdata3 ]] +epprd_rg:process_resources(5.404)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.404)[logredo_volume_groups:2900] grep -wp /dev/sapdata3lv /etc/filesystems +epprd_rg:process_resources(5.409)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.409)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.409)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/sapdata3lv ]] +epprd_rg:process_resources(5.409)[logredo_volume_groups:2895] odmget $'-qname = sapdata4lv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.413)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "sapdata4lv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.415)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.415)[logredo_volume_groups:2898] odmget -q 'name = sapdata4lv and attribute = label' CuAt +epprd_rg:process_resources(5.419)[logredo_volume_groups:2898] [[ -n /oracle/EPP/sapdata4 ]] +epprd_rg:process_resources(5.421)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.421)[logredo_volume_groups:2900] grep -wp /dev/sapdata4lv /etc/filesystems +epprd_rg:process_resources(5.427)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.427)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.427)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/sapdata4lv ]] +epprd_rg:process_resources(5.427)[logredo_volume_groups:2895] odmget $'-qname = boardlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.430)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "boardlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.432)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.432)[logredo_volume_groups:2898] odmget -q 'name = boardlv and attribute = label' CuAt +epprd_rg:process_resources(5.437)[logredo_volume_groups:2898] [[ -n /board_org ]] +epprd_rg:process_resources(5.439)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.439)[logredo_volume_groups:2900] grep -wp /dev/boardlv /etc/filesystems +epprd_rg:process_resources(5.444)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.444)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.444)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/boardlv ]] +epprd_rg:process_resources(5.444)[logredo_volume_groups:2895] odmget $'-qname = origlogAlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.448)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "origlogAlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.450)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.450)[logredo_volume_groups:2898] odmget -q 'name = origlogAlv and attribute = label' CuAt +epprd_rg:process_resources(5.454)[logredo_volume_groups:2898] [[ -n /oracle/EPP/origlogA ]] +epprd_rg:process_resources(5.456)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.457)[logredo_volume_groups:2900] grep -wp /dev/origlogAlv /etc/filesystems +epprd_rg:process_resources(5.462)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.462)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.462)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/origlogAlv ]] +epprd_rg:process_resources(5.462)[logredo_volume_groups:2895] odmget $'-qname = origlogBlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.466)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "origlogBlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.468)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.468)[logredo_volume_groups:2898] odmget -q 'name = origlogBlv and attribute = label' CuAt +epprd_rg:process_resources(5.472)[logredo_volume_groups:2898] [[ -n /oracle/EPP/origlogB ]] +epprd_rg:process_resources(5.474)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.474)[logredo_volume_groups:2900] grep -wp /dev/origlogBlv /etc/filesystems +epprd_rg:process_resources(5.479)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.479)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.479)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/origlogBlv ]] +epprd_rg:process_resources(5.479)[logredo_volume_groups:2895] odmget $'-qname = mirrlogAlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.483)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "mirrlogAlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.485)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.485)[logredo_volume_groups:2898] odmget -q 'name = mirrlogAlv and attribute = label' CuAt +epprd_rg:process_resources(5.490)[logredo_volume_groups:2898] [[ -n /oracle/EPP/mirrlogA ]] +epprd_rg:process_resources(5.492)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.492)[logredo_volume_groups:2900] grep -wp /dev/mirrlogAlv /etc/filesystems +epprd_rg:process_resources(5.497)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.497)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.497)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/mirrlogAlv ]] +epprd_rg:process_resources(5.497)[logredo_volume_groups:2895] odmget $'-qname = mirrlogBlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.501)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "mirrlogBlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.503)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.503)[logredo_volume_groups:2898] odmget -q 'name = mirrlogBlv and attribute = label' CuAt +epprd_rg:process_resources(5.507)[logredo_volume_groups:2898] [[ -n /oracle/EPP/mirrlogB ]] +epprd_rg:process_resources(5.509)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.509)[logredo_volume_groups:2900] grep -wp /dev/mirrlogBlv /etc/filesystems +epprd_rg:process_resources(5.515)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.515)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.515)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/mirrlogBlv ]] +epprd_rg:process_resources(5.515)[logredo_volume_groups:2910] : Remove any duplicates acquired so far +epprd_rg:process_resources(5.517)[logredo_volume_groups:2912] echo /dev/epprdaloglv +epprd_rg:process_resources(5.517)[logredo_volume_groups:2912] sort -u +epprd_rg:process_resources(5.518)[logredo_volume_groups:2912] tr ' ' '\n' +epprd_rg:process_resources(5.524)[logredo_volume_groups:2912] logdevs=/dev/epprdaloglv +epprd_rg:process_resources(5.524)[logredo_volume_groups:2915] : Run logredos in parallel to save time. +epprd_rg:process_resources(5.524)[logredo_volume_groups:2919] [[ -n '' ]] +epprd_rg:process_resources(5.524)[logredo_volume_groups:2944] : Run logredo only if the LV is closed. +epprd_rg:process_resources(5.524)[logredo_volume_groups:2946] awk '$1 ~ /^epprdaloglv$/ && $6 ~ /closed\// {print "CLOSED"}' /var/hacmp/log/.process_resources_logredo.23593416 +epprd_rg:process_resources(5.528)[logredo_volume_groups:2946] [[ -n CLOSED ]] +epprd_rg:process_resources(5.528)[logredo_volume_groups:2949] : Run logredo only if filesystem is not mounted on any of the node in the cluster. +epprd_rg:process_resources(5.528)[logredo_volume_groups:2951] [[ -z '' ]] +epprd_rg:process_resources(5.529)[logredo_volume_groups:2958] rm -f /var/hacmp/log/.process_resources_logredo.23593416 +epprd_rg:process_resources(5.529)[logredo_volume_groups:2953] logredo /dev/epprdaloglv +epprd_rg:process_resources(5.533)[logredo_volume_groups:2962] : Wait for the background logredos from the RGs +epprd_rg:process_resources(5.533)[logredo_volume_groups:2964] wait J2_LOGREDO:log redo processing for /dev/epprdaloglv +epprd_rg:process_resources(5.565)[logredo_volume_groups:2966] return 0 +epprd_rg:process_resources(5.565)[3324] true +epprd_rg:process_resources(5.565)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(5.565)[3328] set -a +epprd_rg:process_resources(5.565)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T17:10:39.924466 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(5.584)[3329] eval JOB_TYPE=FILESYSTEMS ACTION=ACQUIRE FILE_SYSTEMS='"/board_org,/oracle,/oracle/EPP,/oracle/EPP/mirrlogA,/oracle/EPP/mirrlogB,/oracle/EPP/oraarch,/oracle/EPP/origlogA,/oracle/EPP/origlogB,/oracle/EPP/sapdata1,/oracle/EPP/sapdata2,/oracle/EPP/sapdata3,/oracle/EPP/sapdata4,/sapmnt,/usr/sap"' RESOURCE_GROUPS='"epprd_rg' '"' FSCHECK_TOOLS='"fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck"' RECOVERY_METHODS='"sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential"' +epprd_rg:process_resources(5.584)[1] JOB_TYPE=FILESYSTEMS +epprd_rg:process_resources(5.584)[1] ACTION=ACQUIRE +epprd_rg:process_resources(5.584)[1] FILE_SYSTEMS=/board_org,/oracle,/oracle/EPP,/oracle/EPP/mirrlogA,/oracle/EPP/mirrlogB,/oracle/EPP/oraarch,/oracle/EPP/origlogA,/oracle/EPP/origlogB,/oracle/EPP/sapdata1,/oracle/EPP/sapdata2,/oracle/EPP/sapdata3,/oracle/EPP/sapdata4,/sapmnt,/usr/sap +epprd_rg:process_resources(5.584)[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources(5.584)[1] FSCHECK_TOOLS=fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck +epprd_rg:process_resources(5.584)[1] RECOVERY_METHODS=sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:process_resources(5.584)[3330] RC=0 +epprd_rg:process_resources(5.584)[3331] set +a +epprd_rg:process_resources(5.584)[3333] (( 0 != 0 )) +epprd_rg:process_resources(5.584)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(5.584)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(5.584)[3343] export GROUPNAME +epprd_rg:process_resources(5.584)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(5.584)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(5.584)[3360] [[ FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources(5.584)[3360] [[ FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources(5.584)[3482] process_file_systems ACQUIRE +epprd_rg:process_resources(5.584)[process_file_systems:2640] PS4_FUNC=process_file_systems +epprd_rg:process_resources(5.584)[process_file_systems:2640] typeset PS4_FUNC +epprd_rg:process_resources(5.584)[process_file_systems:2641] [[ high == high ]] +epprd_rg:process_resources(5.584)[process_file_systems:2641] set -x +epprd_rg:process_resources(5.584)[process_file_systems:2643] STAT=0 +epprd_rg:process_resources(5.584)[process_file_systems:2645] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources(5.584)[process_file_systems:2647] cl_activate_fs +epprd_rg:cl_activate_fs[819] version=1.1.8.5 +epprd_rg:cl_activate_fs[823] : Check for mounting OEM file systems +epprd_rg:cl_activate_fs[825] OEM_FS=false +epprd_rg:cl_activate_fs[826] (( 0 != 0 )) +epprd_rg:cl_activate_fs[832] STATUS=0 +epprd_rg:cl_activate_fs[832] typeset -li STATUS +epprd_rg:cl_activate_fs[833] EMULATE=REAL +epprd_rg:cl_activate_fs[836] : The environment variable MOUNT_WLMCNTRL_SELFMANAGE is referred inside mount. +epprd_rg:cl_activate_fs[837] : If this variable is set, few calls to wlmcntrl are skipped inside mount, which +epprd_rg:cl_activate_fs[838] : offers performance benefits. Hence we will export this variable if it is set +epprd_rg:cl_activate_fs[839] : in /etc/environment. +epprd_rg:cl_activate_fs[841] grep -w ^MOUNT_WLMCNTRL_SELFMANAGE /etc/environment +epprd_rg:cl_activate_fs[841] export eval +epprd_rg:cl_activate_fs[843] [[ -n FILESYSTEMS ]] +epprd_rg:cl_activate_fs[843] [[ FILESYSTEMS != GROUP ]] +epprd_rg:cl_activate_fs[846] : If JOB_TYPE is set, and it does not equal to GROUP, then +epprd_rg:cl_activate_fs[847] : we are processing for process_resources, which passes requests +epprd_rg:cl_activate_fs[848] : associaed with multiple resource groups through environment variables +epprd_rg:cl_activate_fs[850] activate_fs_process_resources +epprd_rg:cl_activate_fs[activate_fs_process_resources:716] [[ high == high ]] +epprd_rg:cl_activate_fs[activate_fs_process_resources:716] set -x +epprd_rg:cl_activate_fs[activate_fs_process_resources:718] ERRSTATUS=0 +epprd_rg:cl_activate_fs[activate_fs_process_resources:718] typeset -i ERRSTATUS +epprd_rg:cl_activate_fs[activate_fs_process_resources:719] RC=0 +epprd_rg:cl_activate_fs[activate_fs_process_resources:719] typeset -li RC +epprd_rg:cl_activate_fs[activate_fs_process_resources:742] export GROUPNAME +epprd_rg:cl_activate_fs[activate_fs_process_resources:745] : Get the file systems, recovery tool and procedure for this +epprd_rg:cl_activate_fs[activate_fs_process_resources:746] : resource group +epprd_rg:cl_activate_fs[activate_fs_process_resources:748] print /board_org,/oracle,/oracle/EPP,/oracle/EPP/mirrlogA,/oracle/EPP/mirrlogB,/oracle/EPP/oraarch,/oracle/EPP/origlogA,/oracle/EPP/origlogB,/oracle/EPP/sapdata1,/oracle/EPP/sapdata2,/oracle/EPP/sapdata3,/oracle/EPP/sapdata4,/sapmnt,/usr/sap +epprd_rg:cl_activate_fs[activate_fs_process_resources:748] read _RG_FILE_SYSTEMS FILE_SYSTEMS +epprd_rg:cl_activate_fs[activate_fs_process_resources:748] IFS=: +epprd_rg:cl_activate_fs[activate_fs_process_resources:749] print fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck +epprd_rg:cl_activate_fs[activate_fs_process_resources:749] read _RG_FSCHECK_TOOLS FSCHECK_TOOLS +epprd_rg:cl_activate_fs[activate_fs_process_resources:749] IFS=: +epprd_rg:cl_activate_fs[activate_fs_process_resources:750] print sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:cl_activate_fs[activate_fs_process_resources:750] read _RG_RECOVERY_METHODS RECOVERY_METHODS +epprd_rg:cl_activate_fs[activate_fs_process_resources:750] IFS=: +epprd_rg:cl_activate_fs[activate_fs_process_resources:753] : Since all file systems in a resource group use the same recovery +epprd_rg:cl_activate_fs[activate_fs_process_resources:754] : method and recovery means, just pick up the first one in the list +epprd_rg:cl_activate_fs[activate_fs_process_resources:756] print fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck +epprd_rg:cl_activate_fs[activate_fs_process_resources:756] read FSCHECK_TOOL rest +epprd_rg:cl_activate_fs[activate_fs_process_resources:756] IFS=, +epprd_rg:cl_activate_fs[activate_fs_process_resources:757] print sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:cl_activate_fs[activate_fs_process_resources:757] read RECOVERY_METHOD rest +epprd_rg:cl_activate_fs[activate_fs_process_resources:757] IFS=, +epprd_rg:cl_activate_fs[activate_fs_process_resources:760] : If there are any unmounted file systems for this resource group, go +epprd_rg:cl_activate_fs[activate_fs_process_resources:761] : recover and mount them. +epprd_rg:cl_activate_fs[activate_fs_process_resources:763] [[ -n /board_org,/oracle,/oracle/EPP,/oracle/EPP/mirrlogA,/oracle/EPP/mirrlogB,/oracle/EPP/oraarch,/oracle/EPP/origlogA,/oracle/EPP/origlogB,/oracle/EPP/sapdata1,/oracle/EPP/sapdata2,/oracle/EPP/sapdata3,/oracle/EPP/sapdata4,/sapmnt,/usr/sap ]] +epprd_rg:cl_activate_fs[activate_fs_process_resources:765] IFS=, +epprd_rg:cl_activate_fs[activate_fs_process_resources:765] set -- /board_org,/oracle,/oracle/EPP,/oracle/EPP/mirrlogA,/oracle/EPP/mirrlogB,/oracle/EPP/oraarch,/oracle/EPP/origlogA,/oracle/EPP/origlogB,/oracle/EPP/sapdata1,/oracle/EPP/sapdata2,/oracle/EPP/sapdata3,/oracle/EPP/sapdata4,/sapmnt,/usr/sap +epprd_rg:cl_activate_fs[activate_fs_process_resources:765] print /board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap +epprd_rg:cl_activate_fs[activate_fs_process_resources:765] RG_FILE_SYSTEMS='/board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap' +epprd_rg:cl_activate_fs[activate_fs_process_resources:766] activate_fs_process_group sequential fsck '/board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap' +epprd_rg:cl_activate_fs[activate_fs_process_group:362] PS4_LOOP='' +epprd_rg:cl_activate_fs[activate_fs_process_group:362] typeset PS4_LOOP +epprd_rg:cl_activate_fs[activate_fs_process_group:363] [[ high == high ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:363] set -x +epprd_rg:cl_activate_fs[activate_fs_process_group:365] typeset RECOVERY_METHOD FSCHECK_TOOL FILESYSTEMS +epprd_rg:cl_activate_fs[activate_fs_process_group:366] STATUS=0 +epprd_rg:cl_activate_fs[activate_fs_process_group:366] typeset -i STATUS +epprd_rg:cl_activate_fs[activate_fs_process_group:368] RECOVERY_METHOD=sequential +epprd_rg:cl_activate_fs[activate_fs_process_group:369] FSCHECK_TOOL=fsck +epprd_rg:cl_activate_fs[activate_fs_process_group:370] shift 2 +epprd_rg:cl_activate_fs[activate_fs_process_group:371] FILESYSTEMS='/board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap' +epprd_rg:cl_activate_fs[activate_fs_process_group:372] comm_failure='' +epprd_rg:cl_activate_fs[activate_fs_process_group:372] typeset comm_failure +epprd_rg:cl_activate_fs[activate_fs_process_group:373] rc_mount='' +epprd_rg:cl_activate_fs[activate_fs_process_group:373] typeset rc_mount +epprd_rg:cl_activate_fs[activate_fs_process_group:376] : Filter out duplicates, and file systems which are already mounted +epprd_rg:cl_activate_fs[activate_fs_process_group:378] mounts_to_do '/board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap' +epprd_rg:cl_activate_fs[mounts_to_do:283] tomount='/board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap' +epprd_rg:cl_activate_fs[mounts_to_do:283] typeset tomount +epprd_rg:cl_activate_fs[mounts_to_do:286] : Get most current list of mounted filesystems +epprd_rg:cl_activate_fs[mounts_to_do:288] mount +epprd_rg:cl_activate_fs[mounts_to_do:288] 2> /dev/null +epprd_rg:cl_activate_fs[mounts_to_do:288] paste -s - +epprd_rg:cl_activate_fs[mounts_to_do:288] awk '$3 ~ /jfs2*$/ {print $2}' +epprd_rg:cl_activate_fs[mounts_to_do:288] mounted=$'/\t/usr\t/var\t/tmp\t/home\t/admin\t/opt\t/var/adm/ras/livedump\t/ptf' +epprd_rg:cl_activate_fs[mounts_to_do:288] typeset mounted +epprd_rg:cl_activate_fs[mounts_to_do:291] shift +epprd_rg:cl_activate_fs[mounts_to_do:294] typeset -A mountedArray tomountArray +epprd_rg:cl_activate_fs[mounts_to_do:295] typeset fs +epprd_rg:cl_activate_fs[mounts_to_do:298] : Create an associative array for each list, which +epprd_rg:cl_activate_fs[mounts_to_do:299] : has the side effect of dropping any duplicates +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/usr]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/var]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/tmp]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/home]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/admin]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/opt]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/var/adm/ras/livedump]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/ptf]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/board_org]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/mirrlogA]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/mirrlogB]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/oraarch]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/origlogA]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/origlogB]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/sapdata1]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/sapdata2]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/sapdata3]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/sapdata4]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/sapmnt]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/usr/sap]=1 +epprd_rg:cl_activate_fs[mounts_to_do:310] mounted='' +epprd_rg:cl_activate_fs[mounts_to_do:311] tomount='' +epprd_rg:cl_activate_fs[mounts_to_do:314] : expand fs from all tomountArray subscript names +epprd_rg:cl_activate_fs[mounts_to_do:316] set +u +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:329] : Print all subscript names which are all remaining mount +epprd_rg:cl_activate_fs[mounts_to_do:330] : points which have to be mounted +epprd_rg:cl_activate_fs[mounts_to_do:332] print /board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap +epprd_rg:cl_activate_fs[mounts_to_do:332] sort -u +epprd_rg:cl_activate_fs[mounts_to_do:332] tr ' ' '\n' +epprd_rg:cl_activate_fs[mounts_to_do:334] set -u +epprd_rg:cl_activate_fs[activate_fs_process_group:378] FILESYSTEMS=$'/board_org\n/oracle\n/oracle/EPP\n/oracle/EPP/mirrlogA\n/oracle/EPP/mirrlogB\n/oracle/EPP/oraarch\n/oracle/EPP/origlogA\n/oracle/EPP/origlogB\n/oracle/EPP/sapdata1\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata4\n/sapmnt\n/usr/sap' +epprd_rg:cl_activate_fs[activate_fs_process_group:379] [[ -z $'/board_org\n/oracle\n/oracle/EPP\n/oracle/EPP/mirrlogA\n/oracle/EPP/mirrlogB\n/oracle/EPP/oraarch\n/oracle/EPP/origlogA\n/oracle/EPP/origlogB\n/oracle/EPP/sapdata1\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata4\n/sapmnt\n/usr/sap' ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:385] : Get unique temporary file names by using the resource group and the +epprd_rg:cl_activate_fs[activate_fs_process_group:386] : current process ID +epprd_rg:cl_activate_fs[activate_fs_process_group:388] [[ -z epprd_rg ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:397] TMP_FILENAME=epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs[activate_fs_process_group:398] rm -f /tmp/epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs[activate_fs_process_group:401] : If FSCHECK_TOOL is null get from ODM +epprd_rg:cl_activate_fs[activate_fs_process_group:403] [[ -z fsck ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:408] print fsck +epprd_rg:cl_activate_fs[activate_fs_process_group:408] FSCHECK_TOOL=fsck +epprd_rg:cl_activate_fs[activate_fs_process_group:409] [[ fsck != fsck ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:416] : If RECOVERY_METHOD is null get from ODM +epprd_rg:cl_activate_fs[activate_fs_process_group:418] [[ -z sequential ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:423] print sequential +epprd_rg:cl_activate_fs[activate_fs_process_group:423] RECOVERY_METHOD=sequential +epprd_rg:cl_activate_fs[activate_fs_process_group:424] [[ sequential != sequential ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:431] set -u +epprd_rg:cl_activate_fs[activate_fs_process_group:434] : If FSCHECK_TOOL is set to logredo, the logredo for each jfslog has +epprd_rg:cl_activate_fs[activate_fs_process_group:435] : already been done in get_disk_vg_fs, so we only need to do fsck check +epprd_rg:cl_activate_fs[activate_fs_process_group:436] : and recovery here before going on to do the mounts +epprd_rg:cl_activate_fs[activate_fs_process_group:438] [[ fsck == fsck ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:441] TOOL='/usr/sbin/fsck -f -p -o nologredo' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:445] PS4_LOOP=/board_org +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:446] lsfs /board_org +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:446] grep -w /board_org +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:449] : Verify if any of the file system /board_org is already mounted anywhere +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] lsfs -qc /board_org +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] MOUNTGUARD='no)' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:463] fsdb /board_org +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/board_org\n\nFile System Size:\t\t10485032\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t16384\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000009ffd28\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t91\n[10] s_agsize:\t\t0x00004000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x0013ffa5\n \t\t s_fsckpxd.address:\t1310629\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'boardl\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5832\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000000b5\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t181\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d5835\t[52] last unmounted:\t0x63d4d331\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/board_org\n\nFile System Size:\t\t10485032\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t16384\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000009ffd28\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t91\n[10] s_agsize:\t\t0x00004000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x0013ffa5\n \t\t s_fsckpxd.address:\t1310629\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'boardl\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5832\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000000b5\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t181\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d5835\t[52] last unmounted:\t0x63d4d331\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:469] [[ 'no)' == yes ]] +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/boardlv The current volume is: /dev/boardlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:445] PS4_LOOP=/oracle +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:446] lsfs /oracle +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:446] grep -w /oracle +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:449] : Verify if any of the file system /oracle is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] lsfs -qc /oracle +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] MOUNTGUARD='no)' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:463] fsdb /oracle +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle\n\nFile System Size:\t\t41941352\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t65536\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000027ff968\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t211\n[10] s_agsize:\t\t0x00010000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x004fff2d\n \t\t s_fsckpxd.address:\t5242669\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'oracle\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5819\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000295\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t661\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d581e\t[52] last unmounted:\t0x63d4d3ee\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle\n\nFile System Size:\t\t41941352\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t65536\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000027ff968\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t211\n[10] s_agsize:\t\t0x00010000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x004fff2d\n \t\t s_fsckpxd.address:\t5242669\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'oracle\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5819\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000295\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t661\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d581e\t[52] last unmounted:\t0x63d4d3ee\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:469] [[ 'no)' == yes ]] +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/oraclelv The current volume is: /dev/oraclelv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:446] lsfs /oracle/EPP +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:446] grep -w /oracle/EPP +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] lsfs -qc /oracle/EPP +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] MOUNTGUARD='no)' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:463] fsdb /oracle/EPP +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP\n\nFile System Size:\t\t62912232\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t65536\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x0000000003bff6e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t291\n[10] s_agsize:\t\t0x00010000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x0077fedd\n \t\t s_fsckpxd.address:\t7864029\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'epplv\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5824\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000003d5\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t981\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d5826\t[52] last unmounted:\t0x63d4d3ec\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP\n\nFile System Size:\t\t62912232\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t65536\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x0000000003bff6e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t291\n[10] s_agsize:\t\t0x00010000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x0077fedd\n \t\t s_fsckpxd.address:\t7864029\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'epplv\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5824\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000003d5\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t981\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d5826\t[52] last unmounted:\t0x63d4d3ec\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:469] [[ 'no)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/epplv The current volume is: /dev/epplv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:446] lsfs /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:446] grep -w /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/mirrlogA is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] lsfs -qc /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] MOUNTGUARD='no)' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:463] fsdb /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/mirrlogA\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'mirrlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5834\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d5836\t[52] last unmounted:\t0x63d4d3b3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/mirrlogA\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'mirrlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5834\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d5836\t[52] last unmounted:\t0x63d4d3b3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:469] [[ 'no)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/mirrlogAlv The current volume is: /dev/mirrlogAlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:446] lsfs /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:446] grep -w /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/mirrlogB is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] lsfs -qc /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] MOUNTGUARD='no)' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:463] fsdb /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/mirrlogB\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'mirrlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5835\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d5836\t[52] last unmounted:\t0x63d4d3b4\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/mirrlogB\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'mirrlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5835\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d5836\t[52] last unmounted:\t0x63d4d3b4\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:469] [[ 'no)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/mirrlogBlv The current volume is: /dev/mirrlogBlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/oraarch +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:446] lsfs /oracle/EPP/oraarch +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:446] grep -w /oracle/EPP/oraarch +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/oraarch is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] lsfs -qc /oracle/EPP/oraarch +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] MOUNTGUARD='no)' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:463] fsdb /oracle/EPP/oraarch +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/oraarch\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'oraarc\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d582e\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d5835\t[52] last unmounted:\t0x63d4d3b2\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/oraarch\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'oraarc\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d582e\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d5835\t[52] last unmounted:\t0x63d4d3b2\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:469] [[ 'no)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/oraarchlv The current volume is: /dev/oraarchlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/origlogA +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:446] lsfs /oracle/EPP/origlogA +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:446] grep -w /oracle/EPP/origlogA +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/origlogA is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] lsfs -qc /oracle/EPP/origlogA +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] MOUNTGUARD='no)' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:463] fsdb /oracle/EPP/origlogA +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/origlogA\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'origlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5832\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d5836\t[52] last unmounted:\t0x63d4d3b3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/origlogA\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'origlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5832\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d5836\t[52] last unmounted:\t0x63d4d3b3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:469] [[ 'no)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/origlogAlv The current volume is: /dev/origlogAlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/origlogB +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:446] lsfs /oracle/EPP/origlogB +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:446] grep -w /oracle/EPP/origlogB +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/origlogB is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] lsfs -qc /oracle/EPP/origlogB +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] MOUNTGUARD='no)' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:463] fsdb /oracle/EPP/origlogB +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/origlogB\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'origlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5833\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d5836\t[52] last unmounted:\t0x63d4d3b3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/origlogB\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'origlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5833\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d5836\t[52] last unmounted:\t0x63d4d3b3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:469] [[ 'no)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/origlogBlv The current volume is: /dev/origlogBlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:446] lsfs /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:446] grep -w /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/sapdata1 is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] lsfs -qc /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] MOUNTGUARD='no)' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:463] fsdb /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/sapdata1\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d582f\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d5835\t[52] last unmounted:\t0x63d4d3b3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/sapdata1\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d582f\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d5835\t[52] last unmounted:\t0x63d4d3b3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:469] [[ 'no)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/sapdata1lv The current volume is: /dev/sapdata1lv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:446] lsfs /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:446] grep -w /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/sapdata2 is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] lsfs -qc /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] MOUNTGUARD='no)' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:463] fsdb /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/sapdata2\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5830\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d5835\t[52] last unmounted:\t0x63d4d3b3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/sapdata2\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5830\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d5835\t[52] last unmounted:\t0x63d4d3b3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:469] [[ 'no)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/sapdata2lv The current volume is: /dev/sapdata2lv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:446] lsfs /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:446] grep -w /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/sapdata3 is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] lsfs -qc /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] MOUNTGUARD='no)' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:463] fsdb /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/sapdata3\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5831\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d5835\t[52] last unmounted:\t0x63d4d3b3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/sapdata3\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5831\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d5835\t[52] last unmounted:\t0x63d4d3b3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:469] [[ 'no)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/sapdata3lv The current volume is: /dev/sapdata3lv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:446] lsfs /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:446] grep -w /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/sapdata4 is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] lsfs -qc /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] MOUNTGUARD='no)' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:463] fsdb /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/sapdata4\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5831\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d5835\t[52] last unmounted:\t0x63d4d3b3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/sapdata4\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5831\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d5835\t[52] last unmounted:\t0x63d4d3b3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:469] [[ 'no)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/sapdata4lv The current volume is: /dev/sapdata4lv Primary superblock is valid. +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:445] PS4_LOOP=/sapmnt +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:446] lsfs /sapmnt +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:446] grep -w /sapmnt +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:449] : Verify if any of the file system /sapmnt is already mounted anywhere +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] lsfs -qc /sapmnt +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] MOUNTGUARD='no)' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:463] fsdb /sapmnt +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/sapmnt\n\nFile System Size:\t\t20970472\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t32768\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000013ffbe8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t131\n[10] s_agsize:\t\t0x00008000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x0027ff7d\n \t\t s_fsckpxd.address:\t2621309\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapmnt\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5818\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000155\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t341\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d581e\t[52] last unmounted:\t0x63d4d408\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/sapmnt\n\nFile System Size:\t\t20970472\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t32768\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000013ffbe8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t131\n[10] s_agsize:\t\t0x00008000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x0027ff7d\n \t\t s_fsckpxd.address:\t2621309\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapmnt\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5818\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000155\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t341\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d581e\t[52] last unmounted:\t0x63d4d408\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:469] [[ 'no)' == yes ]] +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/sapmntlv The current volume is: /dev/sapmntlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:445] PS4_LOOP=/usr/sap +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:446] lsfs /usr/sap +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:446] grep -w /usr/sap +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:449] : Verify if any of the file system /usr/sap is already mounted anywhere +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] lsfs -qc /usr/sap +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] MOUNTGUARD='no)' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:463] fsdb /usr/sap +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/usr/sap\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'saplv\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5815\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d581e\t[52] last unmounted:\t0x63d4d3c7\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/usr/sap\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000001\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x00000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'saplv\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5815\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x639d581e\t[52] last unmounted:\t0x63d4d3c7\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:469] [[ 'no)' == yes ]] +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/saplv The current volume is: /dev/saplv Primary superblock is valid. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:513] : Allow any backgrounded fsck operations to finish +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:515] wait +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:519] : Now attempt to mount all the file systems +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:521] ALLFS=All_filesystems +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:522] cl_RMupdate resource_acquiring All_filesystems cl_activate_fs 2023-01-28T17:10:40.713652 2023-01-28T17:10:40.718011 +epprd_rg:cl_activate_fs(0.783):/usr/sap[activate_fs_process_group:524] PS4_TIMER=true +epprd_rg:cl_activate_fs(0.783):/usr/sap[activate_fs_process_group:524] typeset PS4_TIMER +epprd_rg:cl_activate_fs(0.783):/board_org[activate_fs_process_group:527] PS4_LOOP=/board_org +epprd_rg:cl_activate_fs(0.783):/board_org[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(0.783):/board_org[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(0.783):/board_org[activate_fs_process_group:540] fs_mount /board_org fsck epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(0.783):/board_org[fs_mount:69] FS=/board_org +epprd_rg:cl_activate_fs(0.783):/board_org[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(0.783):/board_org[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(0.783):/board_org[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(0.783):/board_org[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(0.783):/board_org[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(0.783):/board_org[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(0.783):/board_org[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(0.783):/board_org[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(0.783):/board_org[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(0.783):/board_org[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(0.783):/board_org[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(0.783):/board_org[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(0.783):/board_org[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(0.783):/board_org[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(0.783):/board_org[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(0.783):/board_org[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(0.783):/board_org[fs_mount:81] : Here check to see if the information in /etc/filesystems for /board_org +epprd_rg:cl_activate_fs(0.783):/board_org[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(0.783):/board_org[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(0.783):/board_org[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(0.783):/board_org[fs_mount:86] lsfs -c /board_org +epprd_rg:cl_activate_fs(0.784):/board_org[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(0.789):/board_org[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(0.784):/board_org[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(0.789):/board_org[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(0.789):/board_org[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(0.790):/board_org[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(0.784):/board_org[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(0.791):/board_org[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(0.791):/board_org[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(0.791):/board_org[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(0.793):/board_org[fs_mount:100] LV_name=boardlv +epprd_rg:cl_activate_fs(0.793):/board_org[fs_mount:101] getlvcb -T -A boardlv +epprd_rg:cl_activate_fs(0.794):/board_org[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(0.811):/board_org[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(0.794):/board_org[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.11 \n\t lvname = boardlv \n\t label = /board_org \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:47 2022\n \t time modified = Sat Dec 17 14:48:34 2022\n ' +epprd_rg:cl_activate_fs(0.812):/board_org[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(0.812):/board_org[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(0.813):/board_org[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(0.794):/board_org[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.11 \n\t lvname = boardlv \n\t label = /board_org \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:47 2022\n \t time modified = Sat Dec 17 14:48:34 2022\n ' +epprd_rg:cl_activate_fs(0.814):/board_org[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(0.814):/board_org[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(0.815):/board_org[fs_mount:115] clodmget -q 'name = boardlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(0.819):/board_org[fs_mount:115] CuAt_label=/board_org +epprd_rg:cl_activate_fs(0.819):/board_org[fs_mount:118] : At this point, if things are working correctly, /board_org from /etc/filesystems +epprd_rg:cl_activate_fs(0.819):/board_org[fs_mount:119] : should match /board_org from CuAt ODM and /board_org from the LVCB +epprd_rg:cl_activate_fs(0.819):/board_org[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(0.819):/board_org[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(0.819):/board_org[fs_mount:123] [[ /board_org != /board_org ]] +epprd_rg:cl_activate_fs(0.819):/board_org[fs_mount:128] [[ /board_org != /board_org ]] +epprd_rg:cl_activate_fs(0.819):/board_org[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(0.819):/board_org[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(0.819):/board_org[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(0.839):/board_org[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(0.839):/board_org[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(0.839):/board_org[fs_mount:160] amlog_trace '' 'Activating Filesystem|/board_org' +epprd_rg:cl_activate_fs(0.839):/board_org[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(0.840):/board_org[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(0.864):/board_org[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(0.866):/board_org[amlog_trace:319] DATE=2023-01-28T17:10:40.802224 +epprd_rg:cl_activate_fs(0.866):/board_org[amlog_trace:320] echo '|2023-01-28T17:10:40.802224|INFO: Activating Filesystem|/board_org' +epprd_rg:cl_activate_fs(0.866):/board_org[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(0.867):/board_org[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(0.869):/board_org[fs_mount:162] : Try to mount filesystem /board_org at Jan 28 17:10:40.000 +epprd_rg:cl_activate_fs(0.869):/board_org[fs_mount:163] mount /board_org +epprd_rg:cl_activate_fs(0.881):/board_org[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(0.881):/board_org[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(0.881):/board_org[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(0.881):/board_org[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/board_org' +epprd_rg:cl_activate_fs(0.881):/board_org[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(0.882):/board_org[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(0.906):/board_org[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(0.909):/board_org[amlog_trace:319] DATE=2023-01-28T17:10:40.844938 +epprd_rg:cl_activate_fs(0.909):/board_org[amlog_trace:320] echo '|2023-01-28T17:10:40.844938|INFO: Activating Filesystems completed|/board_org' +epprd_rg:cl_activate_fs(0.909):/board_org[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(0.909):/board_org[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(0.909):/board_org[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(0.909):/board_org[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(0.909):/board_org[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(0.909):/board_org[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(0.909):/board_org[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(0.909):/board_org[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(0.909):/board_org[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(0.909):/board_org[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(0.909):/board_org[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(0.910):/board_org[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(0.911):/board_org[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(0.913):/board_org[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(0.913):/board_org[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(0.913):/board_org[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(0.913):/board_org[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(0.913):/board_org[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(0.913):/board_org[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(0.913):/board_org[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(0.913):/board_org[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(0.794):/board_org[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.11 \n\t lvname = boardlv \n\t label = /board_org \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:47 2022\n \t time modified = Sat Dec 17 14:48:34 2022\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(0.913):/board_org[fs_mount:249] chfs -a mountguard=yes /board_org +epprd_rg:cl_activate_fs(0.914):/board_org[fs_mount:249] CLUSTER_OVERRIDE=yes /board_org is now guarded against concurrent mounts. +epprd_rg:cl_activate_fs(1.071):/board_org[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.071):/oracle[activate_fs_process_group:527] PS4_LOOP=/oracle +epprd_rg:cl_activate_fs(1.071):/oracle[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.071):/oracle[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.071):/oracle[activate_fs_process_group:540] fs_mount /oracle fsck epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(1.071):/oracle[fs_mount:69] FS=/oracle +epprd_rg:cl_activate_fs(1.071):/oracle[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.071):/oracle[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.071):/oracle[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.071):/oracle[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(1.071):/oracle[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.071):/oracle[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.071):/oracle[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.071):/oracle[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.071):/oracle[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.071):/oracle[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.071):/oracle[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.071):/oracle[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.071):/oracle[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.071):/oracle[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.071):/oracle[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.071):/oracle[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.071):/oracle[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle +epprd_rg:cl_activate_fs(1.071):/oracle[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.071):/oracle[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.071):/oracle[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.071):/oracle[fs_mount:86] lsfs -c /oracle +epprd_rg:cl_activate_fs(1.072):/oracle[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.077):/oracle[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.072):/oracle[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle:/dev/oraclelv:jfs2:::41943040:rw:no:no' +epprd_rg:cl_activate_fs(1.077):/oracle[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.077):/oracle[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.078):/oracle[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.072):/oracle[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle:/dev/oraclelv:jfs2:::41943040:rw:no:no' +epprd_rg:cl_activate_fs(1.079):/oracle[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.080):/oracle[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.080):/oracle[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.081):/oracle[fs_mount:100] LV_name=oraclelv +epprd_rg:cl_activate_fs(1.081):/oracle[fs_mount:101] getlvcb -T -A oraclelv +epprd_rg:cl_activate_fs(1.082):/oracle[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.100):/oracle[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.082):/oracle[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.4 \n\t lvname = oraclelv \n\t label = /oracle \n\t machine id = 44AF14B00 \n\t number lps = 40 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:42 2022\n \t time modified = Sat Dec 17 14:48:09 2022\n ' +epprd_rg:cl_activate_fs(1.100):/oracle[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.100):/oracle[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.101):/oracle[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.082):/oracle[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.4 \n\t lvname = oraclelv \n\t label = /oracle \n\t machine id = 44AF14B00 \n\t number lps = 40 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:42 2022\n \t time modified = Sat Dec 17 14:48:09 2022\n ' +epprd_rg:cl_activate_fs(1.102):/oracle[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.103):/oracle[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.104):/oracle[fs_mount:115] clodmget -q 'name = oraclelv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.108):/oracle[fs_mount:115] CuAt_label=/oracle +epprd_rg:cl_activate_fs(1.108):/oracle[fs_mount:118] : At this point, if things are working correctly, /oracle from /etc/filesystems +epprd_rg:cl_activate_fs(1.108):/oracle[fs_mount:119] : should match /oracle from CuAt ODM and /oracle from the LVCB +epprd_rg:cl_activate_fs(1.108):/oracle[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.108):/oracle[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.108):/oracle[fs_mount:123] [[ /oracle != /oracle ]] +epprd_rg:cl_activate_fs(1.108):/oracle[fs_mount:128] [[ /oracle != /oracle ]] +epprd_rg:cl_activate_fs(1.108):/oracle[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.108):/oracle[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.108):/oracle[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.128):/oracle[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.128):/oracle[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.128):/oracle[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle' +epprd_rg:cl_activate_fs(1.128):/oracle[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.129):/oracle[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.152):/oracle[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.155):/oracle[amlog_trace:319] DATE=2023-01-28T17:10:41.090991 +epprd_rg:cl_activate_fs(1.155):/oracle[amlog_trace:320] echo '|2023-01-28T17:10:41.090991|INFO: Activating Filesystem|/oracle' +epprd_rg:cl_activate_fs(1.155):/oracle[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.155):/oracle[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.158):/oracle[fs_mount:162] : Try to mount filesystem /oracle at Jan 28 17:10:41.000 +epprd_rg:cl_activate_fs(1.158):/oracle[fs_mount:163] mount /oracle +epprd_rg:cl_activate_fs(1.169):/oracle[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.169):/oracle[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.169):/oracle[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.169):/oracle[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle' +epprd_rg:cl_activate_fs(1.169):/oracle[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.170):/oracle[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.194):/oracle[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.197):/oracle[amlog_trace:319] DATE=2023-01-28T17:10:41.132698 +epprd_rg:cl_activate_fs(1.197):/oracle[amlog_trace:320] echo '|2023-01-28T17:10:41.132698|INFO: Activating Filesystems completed|/oracle' +epprd_rg:cl_activate_fs(1.197):/oracle[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.197):/oracle[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.197):/oracle[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.197):/oracle[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.197):/oracle[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.197):/oracle[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.197):/oracle[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.197):/oracle[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.197):/oracle[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.197):/oracle[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.197):/oracle[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.198):/oracle[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.199):/oracle[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.201):/oracle[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.201):/oracle[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.201):/oracle[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.201):/oracle[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.201):/oracle[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.201):/oracle[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.201):/oracle[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.201):/oracle[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.082):/oracle[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.4 \n\t lvname = oraclelv \n\t label = /oracle \n\t machine id = 44AF14B00 \n\t number lps = 40 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:42 2022\n \t time modified = Sat Dec 17 14:48:09 2022\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.201):/oracle[fs_mount:249] chfs -a mountguard=yes /oracle +epprd_rg:cl_activate_fs(1.202):/oracle[fs_mount:249] CLUSTER_OVERRIDE=yes /oracle is now guarded against concurrent mounts. +epprd_rg:cl_activate_fs(1.355):/oracle[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[activate_fs_process_group:540] fs_mount /oracle/EPP fsck epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[fs_mount:69] FS=/oracle/EPP +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.355):/oracle/EPP[fs_mount:86] lsfs -c /oracle/EPP +epprd_rg:cl_activate_fs(1.356):/oracle/EPP[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.361):/oracle/EPP[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.356):/oracle/EPP[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP:/dev/epplv:jfs2:::62914560:rw:no:no' +epprd_rg:cl_activate_fs(1.361):/oracle/EPP[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.361):/oracle/EPP[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.362):/oracle/EPP[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.356):/oracle/EPP[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP:/dev/epplv:jfs2:::62914560:rw:no:no' +epprd_rg:cl_activate_fs(1.363):/oracle/EPP[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.364):/oracle/EPP[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.364):/oracle/EPP[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.365):/oracle/EPP[fs_mount:100] LV_name=epplv +epprd_rg:cl_activate_fs(1.365):/oracle/EPP[fs_mount:101] getlvcb -T -A epplv +epprd_rg:cl_activate_fs(1.366):/oracle/EPP[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.384):/oracle/EPP[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.366):/oracle/EPP[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.5 \n\t lvname = epplv \n\t label = /oracle/EPP \n\t machine id = 44AF14B00 \n\t number lps = 60 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Dec 17 14:48:21 2022\n ' +epprd_rg:cl_activate_fs(1.384):/oracle/EPP[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.384):/oracle/EPP[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.385):/oracle/EPP[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.366):/oracle/EPP[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.5 \n\t lvname = epplv \n\t label = /oracle/EPP \n\t machine id = 44AF14B00 \n\t number lps = 60 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Dec 17 14:48:21 2022\n ' +epprd_rg:cl_activate_fs(1.386):/oracle/EPP[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.386):/oracle/EPP[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.388):/oracle/EPP[fs_mount:115] clodmget -q 'name = epplv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.391):/oracle/EPP[fs_mount:115] CuAt_label=/oracle/EPP +epprd_rg:cl_activate_fs(1.391):/oracle/EPP[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP from /etc/filesystems +epprd_rg:cl_activate_fs(1.391):/oracle/EPP[fs_mount:119] : should match /oracle/EPP from CuAt ODM and /oracle/EPP from the LVCB +epprd_rg:cl_activate_fs(1.391):/oracle/EPP[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.391):/oracle/EPP[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.391):/oracle/EPP[fs_mount:123] [[ /oracle/EPP != /oracle/EPP ]] +epprd_rg:cl_activate_fs(1.391):/oracle/EPP[fs_mount:128] [[ /oracle/EPP != /oracle/EPP ]] +epprd_rg:cl_activate_fs(1.391):/oracle/EPP[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.391):/oracle/EPP[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.391):/oracle/EPP[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.411):/oracle/EPP[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.411):/oracle/EPP[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.411):/oracle/EPP[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP' +epprd_rg:cl_activate_fs(1.411):/oracle/EPP[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.412):/oracle/EPP[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.436):/oracle/EPP[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.439):/oracle/EPP[amlog_trace:319] DATE=2023-01-28T17:10:41.374882 +epprd_rg:cl_activate_fs(1.439):/oracle/EPP[amlog_trace:320] echo '|2023-01-28T17:10:41.374882|INFO: Activating Filesystem|/oracle/EPP' +epprd_rg:cl_activate_fs(1.439):/oracle/EPP[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.439):/oracle/EPP[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.442):/oracle/EPP[fs_mount:162] : Try to mount filesystem /oracle/EPP at Jan 28 17:10:41.000 +epprd_rg:cl_activate_fs(1.442):/oracle/EPP[fs_mount:163] mount /oracle/EPP +epprd_rg:cl_activate_fs(1.468):/oracle/EPP[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.468):/oracle/EPP[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.468):/oracle/EPP[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.468):/oracle/EPP[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP' +epprd_rg:cl_activate_fs(1.468):/oracle/EPP[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.469):/oracle/EPP[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.493):/oracle/EPP[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.496):/oracle/EPP[amlog_trace:319] DATE=2023-01-28T17:10:41.431447 +epprd_rg:cl_activate_fs(1.496):/oracle/EPP[amlog_trace:320] echo '|2023-01-28T17:10:41.431447|INFO: Activating Filesystems completed|/oracle/EPP' +epprd_rg:cl_activate_fs(1.496):/oracle/EPP[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.496):/oracle/EPP[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.496):/oracle/EPP[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.496):/oracle/EPP[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.496):/oracle/EPP[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.496):/oracle/EPP[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.496):/oracle/EPP[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.496):/oracle/EPP[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.496):/oracle/EPP[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.496):/oracle/EPP[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.496):/oracle/EPP[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.497):/oracle/EPP[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.498):/oracle/EPP[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.499):/oracle/EPP[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.499):/oracle/EPP[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.500):/oracle/EPP[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.500):/oracle/EPP[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.500):/oracle/EPP[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.500):/oracle/EPP[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.500):/oracle/EPP[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.500):/oracle/EPP[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.366):/oracle/EPP[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.5 \n\t lvname = epplv \n\t label = /oracle/EPP \n\t machine id = 44AF14B00 \n\t number lps = 60 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Dec 17 14:48:21 2022\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.500):/oracle/EPP[fs_mount:249] chfs -a mountguard=yes /oracle/EPP +epprd_rg:cl_activate_fs(1.501):/oracle/EPP[fs_mount:249] CLUSTER_OVERRIDE=yes /oracle/EPP is now guarded against concurrent mounts. +epprd_rg:cl_activate_fs(1.654):/oracle/EPP[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[activate_fs_process_group:540] fs_mount /oracle/EPP/mirrlogA fsck epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[fs_mount:69] FS=/oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.654):/oracle/EPP/mirrlogA[fs_mount:86] lsfs -c /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.655):/oracle/EPP/mirrlogA[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.660):/oracle/EPP/mirrlogA[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.656):/oracle/EPP/mirrlogA[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogA:/dev/mirrlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.660):/oracle/EPP/mirrlogA[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.660):/oracle/EPP/mirrlogA[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.661):/oracle/EPP/mirrlogA[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.656):/oracle/EPP/mirrlogA[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogA:/dev/mirrlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.662):/oracle/EPP/mirrlogA[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.663):/oracle/EPP/mirrlogA[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.663):/oracle/EPP/mirrlogA[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.664):/oracle/EPP/mirrlogA[fs_mount:100] LV_name=mirrlogAlv +epprd_rg:cl_activate_fs(1.664):/oracle/EPP/mirrlogA[fs_mount:101] getlvcb -T -A mirrlogAlv +epprd_rg:cl_activate_fs(1.665):/oracle/EPP/mirrlogA[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.682):/oracle/EPP/mirrlogA[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.665):/oracle/EPP/mirrlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.14 \n\t lvname = mirrlogAlv \n\t label = /oracle/EPP/mirrlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Dec 17 14:48:36 2022\n ' +epprd_rg:cl_activate_fs(1.682):/oracle/EPP/mirrlogA[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.682):/oracle/EPP/mirrlogA[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.683):/oracle/EPP/mirrlogA[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.665):/oracle/EPP/mirrlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.14 \n\t lvname = mirrlogAlv \n\t label = /oracle/EPP/mirrlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Dec 17 14:48:36 2022\n ' +epprd_rg:cl_activate_fs(1.684):/oracle/EPP/mirrlogA[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.685):/oracle/EPP/mirrlogA[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.686):/oracle/EPP/mirrlogA[fs_mount:115] clodmget -q 'name = mirrlogAlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.690):/oracle/EPP/mirrlogA[fs_mount:115] CuAt_label=/oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.690):/oracle/EPP/mirrlogA[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/mirrlogA from /etc/filesystems +epprd_rg:cl_activate_fs(1.690):/oracle/EPP/mirrlogA[fs_mount:119] : should match /oracle/EPP/mirrlogA from CuAt ODM and /oracle/EPP/mirrlogA from the LVCB +epprd_rg:cl_activate_fs(1.690):/oracle/EPP/mirrlogA[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.690):/oracle/EPP/mirrlogA[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.690):/oracle/EPP/mirrlogA[fs_mount:123] [[ /oracle/EPP/mirrlogA != /oracle/EPP/mirrlogA ]] +epprd_rg:cl_activate_fs(1.690):/oracle/EPP/mirrlogA[fs_mount:128] [[ /oracle/EPP/mirrlogA != /oracle/EPP/mirrlogA ]] +epprd_rg:cl_activate_fs(1.690):/oracle/EPP/mirrlogA[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.690):/oracle/EPP/mirrlogA[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.690):/oracle/EPP/mirrlogA[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.710):/oracle/EPP/mirrlogA[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.710):/oracle/EPP/mirrlogA[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.710):/oracle/EPP/mirrlogA[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_activate_fs(1.710):/oracle/EPP/mirrlogA[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.711):/oracle/EPP/mirrlogA[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.735):/oracle/EPP/mirrlogA[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.737):/oracle/EPP/mirrlogA[amlog_trace:319] DATE=2023-01-28T17:10:41.673156 +epprd_rg:cl_activate_fs(1.737):/oracle/EPP/mirrlogA[amlog_trace:320] echo '|2023-01-28T17:10:41.673156|INFO: Activating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_activate_fs(1.737):/oracle/EPP/mirrlogA[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.737):/oracle/EPP/mirrlogA[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.740):/oracle/EPP/mirrlogA[fs_mount:162] : Try to mount filesystem /oracle/EPP/mirrlogA at Jan 28 17:10:41.000 +epprd_rg:cl_activate_fs(1.740):/oracle/EPP/mirrlogA[fs_mount:163] mount /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.752):/oracle/EPP/mirrlogA[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.752):/oracle/EPP/mirrlogA[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.752):/oracle/EPP/mirrlogA[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.752):/oracle/EPP/mirrlogA[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/mirrlogA' +epprd_rg:cl_activate_fs(1.752):/oracle/EPP/mirrlogA[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.753):/oracle/EPP/mirrlogA[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.777):/oracle/EPP/mirrlogA[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.779):/oracle/EPP/mirrlogA[amlog_trace:319] DATE=2023-01-28T17:10:41.715361 +epprd_rg:cl_activate_fs(1.780):/oracle/EPP/mirrlogA[amlog_trace:320] echo '|2023-01-28T17:10:41.715361|INFO: Activating Filesystems completed|/oracle/EPP/mirrlogA' +epprd_rg:cl_activate_fs(1.780):/oracle/EPP/mirrlogA[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.780):/oracle/EPP/mirrlogA[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.780):/oracle/EPP/mirrlogA[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.780):/oracle/EPP/mirrlogA[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.780):/oracle/EPP/mirrlogA[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.780):/oracle/EPP/mirrlogA[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.780):/oracle/EPP/mirrlogA[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.780):/oracle/EPP/mirrlogA[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.780):/oracle/EPP/mirrlogA[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.780):/oracle/EPP/mirrlogA[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.780):/oracle/EPP/mirrlogA[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.781):/oracle/EPP/mirrlogA[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.781):/oracle/EPP/mirrlogA[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.783):/oracle/EPP/mirrlogA[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.783):/oracle/EPP/mirrlogA[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.784):/oracle/EPP/mirrlogA[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.784):/oracle/EPP/mirrlogA[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.784):/oracle/EPP/mirrlogA[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.784):/oracle/EPP/mirrlogA[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.784):/oracle/EPP/mirrlogA[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.784):/oracle/EPP/mirrlogA[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.665):/oracle/EPP/mirrlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.14 \n\t lvname = mirrlogAlv \n\t label = /oracle/EPP/mirrlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Dec 17 14:48:36 2022\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.784):/oracle/EPP/mirrlogA[fs_mount:249] chfs -a mountguard=yes /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.785):/oracle/EPP/mirrlogA[fs_mount:249] CLUSTER_OVERRIDE=yes /oracle/EPP/mirrlogA is now guarded against concurrent mounts. +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogA[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[activate_fs_process_group:540] fs_mount /oracle/EPP/mirrlogB fsck epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[fs_mount:69] FS=/oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/mirrlogB[fs_mount:86] lsfs -c /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.940):/oracle/EPP/mirrlogB[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.945):/oracle/EPP/mirrlogB[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.940):/oracle/EPP/mirrlogB[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogB:/dev/mirrlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.945):/oracle/EPP/mirrlogB[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.945):/oracle/EPP/mirrlogB[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.946):/oracle/EPP/mirrlogB[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.940):/oracle/EPP/mirrlogB[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogB:/dev/mirrlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.947):/oracle/EPP/mirrlogB[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.948):/oracle/EPP/mirrlogB[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.948):/oracle/EPP/mirrlogB[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.949):/oracle/EPP/mirrlogB[fs_mount:100] LV_name=mirrlogBlv +epprd_rg:cl_activate_fs(1.949):/oracle/EPP/mirrlogB[fs_mount:101] getlvcb -T -A mirrlogBlv +epprd_rg:cl_activate_fs(1.950):/oracle/EPP/mirrlogB[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.967):/oracle/EPP/mirrlogB[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.950):/oracle/EPP/mirrlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.15 \n\t lvname = mirrlogBlv \n\t label = /oracle/EPP/mirrlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:50 2022\n \t time modified = Sat Dec 17 14:48:37 2022\n ' +epprd_rg:cl_activate_fs(1.968):/oracle/EPP/mirrlogB[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.968):/oracle/EPP/mirrlogB[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.968):/oracle/EPP/mirrlogB[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.950):/oracle/EPP/mirrlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.15 \n\t lvname = mirrlogBlv \n\t label = /oracle/EPP/mirrlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:50 2022\n \t time modified = Sat Dec 17 14:48:37 2022\n ' +epprd_rg:cl_activate_fs(1.970):/oracle/EPP/mirrlogB[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.970):/oracle/EPP/mirrlogB[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.972):/oracle/EPP/mirrlogB[fs_mount:115] clodmget -q 'name = mirrlogBlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.975):/oracle/EPP/mirrlogB[fs_mount:115] CuAt_label=/oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.975):/oracle/EPP/mirrlogB[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/mirrlogB from /etc/filesystems +epprd_rg:cl_activate_fs(1.975):/oracle/EPP/mirrlogB[fs_mount:119] : should match /oracle/EPP/mirrlogB from CuAt ODM and /oracle/EPP/mirrlogB from the LVCB +epprd_rg:cl_activate_fs(1.975):/oracle/EPP/mirrlogB[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.975):/oracle/EPP/mirrlogB[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.975):/oracle/EPP/mirrlogB[fs_mount:123] [[ /oracle/EPP/mirrlogB != /oracle/EPP/mirrlogB ]] +epprd_rg:cl_activate_fs(1.975):/oracle/EPP/mirrlogB[fs_mount:128] [[ /oracle/EPP/mirrlogB != /oracle/EPP/mirrlogB ]] +epprd_rg:cl_activate_fs(1.975):/oracle/EPP/mirrlogB[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.975):/oracle/EPP/mirrlogB[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.975):/oracle/EPP/mirrlogB[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.995):/oracle/EPP/mirrlogB[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.995):/oracle/EPP/mirrlogB[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.995):/oracle/EPP/mirrlogB[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_activate_fs(1.995):/oracle/EPP/mirrlogB[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.996):/oracle/EPP/mirrlogB[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.020):/oracle/EPP/mirrlogB[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.023):/oracle/EPP/mirrlogB[amlog_trace:319] DATE=2023-01-28T17:10:41.958692 +epprd_rg:cl_activate_fs(2.023):/oracle/EPP/mirrlogB[amlog_trace:320] echo '|2023-01-28T17:10:41.958692|INFO: Activating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_activate_fs(2.023):/oracle/EPP/mirrlogB[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.023):/oracle/EPP/mirrlogB[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(2.026):/oracle/EPP/mirrlogB[fs_mount:162] : Try to mount filesystem /oracle/EPP/mirrlogB at Jan 28 17:10:41.000 +epprd_rg:cl_activate_fs(2.026):/oracle/EPP/mirrlogB[fs_mount:163] mount /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(2.037):/oracle/EPP/mirrlogB[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.037):/oracle/EPP/mirrlogB[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(2.037):/oracle/EPP/mirrlogB[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(2.037):/oracle/EPP/mirrlogB[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/mirrlogB' +epprd_rg:cl_activate_fs(2.037):/oracle/EPP/mirrlogB[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.038):/oracle/EPP/mirrlogB[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.062):/oracle/EPP/mirrlogB[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.065):/oracle/EPP/mirrlogB[amlog_trace:319] DATE=2023-01-28T17:10:42.000582 +epprd_rg:cl_activate_fs(2.065):/oracle/EPP/mirrlogB[amlog_trace:320] echo '|2023-01-28T17:10:42.000582|INFO: Activating Filesystems completed|/oracle/EPP/mirrlogB' +epprd_rg:cl_activate_fs(2.065):/oracle/EPP/mirrlogB[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.065):/oracle/EPP/mirrlogB[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(2.065):/oracle/EPP/mirrlogB[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(2.065):/oracle/EPP/mirrlogB[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(2.065):/oracle/EPP/mirrlogB[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(2.065):/oracle/EPP/mirrlogB[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(2.065):/oracle/EPP/mirrlogB[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(2.065):/oracle/EPP/mirrlogB[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(2.065):/oracle/EPP/mirrlogB[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(2.065):/oracle/EPP/mirrlogB[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(2.065):/oracle/EPP/mirrlogB[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(2.066):/oracle/EPP/mirrlogB[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(2.067):/oracle/EPP/mirrlogB[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(2.069):/oracle/EPP/mirrlogB[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(2.069):/oracle/EPP/mirrlogB[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(2.069):/oracle/EPP/mirrlogB[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(2.069):/oracle/EPP/mirrlogB[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(2.069):/oracle/EPP/mirrlogB[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(2.069):/oracle/EPP/mirrlogB[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(2.069):/oracle/EPP/mirrlogB[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(2.069):/oracle/EPP/mirrlogB[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.950):/oracle/EPP/mirrlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.15 \n\t lvname = mirrlogBlv \n\t label = /oracle/EPP/mirrlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:50 2022\n \t time modified = Sat Dec 17 14:48:37 2022\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(2.069):/oracle/EPP/mirrlogB[fs_mount:249] chfs -a mountguard=yes /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(2.070):/oracle/EPP/mirrlogB[fs_mount:249] CLUSTER_OVERRIDE=yes /oracle/EPP/mirrlogB is now guarded against concurrent mounts. +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/mirrlogB[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/oraarch +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[activate_fs_process_group:540] fs_mount /oracle/EPP/oraarch fsck epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[fs_mount:69] FS=/oracle/EPP/oraarch +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/oraarch +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/oraarch[fs_mount:86] lsfs -c /oracle/EPP/oraarch +epprd_rg:cl_activate_fs(2.228):/oracle/EPP/oraarch[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(2.233):/oracle/EPP/oraarch[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(2.228):/oracle/EPP/oraarch[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/oraarch:/dev/oraarchlv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.233):/oracle/EPP/oraarch[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(2.233):/oracle/EPP/oraarch[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.234):/oracle/EPP/oraarch[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(2.228):/oracle/EPP/oraarch[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/oraarch:/dev/oraarchlv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.235):/oracle/EPP/oraarch[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(2.236):/oracle/EPP/oraarch[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(2.236):/oracle/EPP/oraarch[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(2.237):/oracle/EPP/oraarch[fs_mount:100] LV_name=oraarchlv +epprd_rg:cl_activate_fs(2.237):/oracle/EPP/oraarch[fs_mount:101] getlvcb -T -A oraarchlv +epprd_rg:cl_activate_fs(2.238):/oracle/EPP/oraarch[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/oraarch[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(2.238):/oracle/EPP/oraarch[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.6 \n\t lvname = oraarchlv \n\t label = /oracle/EPP/oraarch \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Dec 17 14:48:30 2022\n ' +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/oraarch[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/oraarch[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.256):/oracle/EPP/oraarch[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(2.238):/oracle/EPP/oraarch[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.6 \n\t lvname = oraarchlv \n\t label = /oracle/EPP/oraarch \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Dec 17 14:48:30 2022\n ' +epprd_rg:cl_activate_fs(2.257):/oracle/EPP/oraarch[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(2.258):/oracle/EPP/oraarch[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(2.259):/oracle/EPP/oraarch[fs_mount:115] clodmget -q 'name = oraarchlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(2.263):/oracle/EPP/oraarch[fs_mount:115] CuAt_label=/oracle/EPP/oraarch +epprd_rg:cl_activate_fs(2.263):/oracle/EPP/oraarch[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/oraarch from /etc/filesystems +epprd_rg:cl_activate_fs(2.263):/oracle/EPP/oraarch[fs_mount:119] : should match /oracle/EPP/oraarch from CuAt ODM and /oracle/EPP/oraarch from the LVCB +epprd_rg:cl_activate_fs(2.263):/oracle/EPP/oraarch[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(2.263):/oracle/EPP/oraarch[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(2.263):/oracle/EPP/oraarch[fs_mount:123] [[ /oracle/EPP/oraarch != /oracle/EPP/oraarch ]] +epprd_rg:cl_activate_fs(2.263):/oracle/EPP/oraarch[fs_mount:128] [[ /oracle/EPP/oraarch != /oracle/EPP/oraarch ]] +epprd_rg:cl_activate_fs(2.263):/oracle/EPP/oraarch[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.263):/oracle/EPP/oraarch[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(2.263):/oracle/EPP/oraarch[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(2.283):/oracle/EPP/oraarch[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.283):/oracle/EPP/oraarch[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(2.283):/oracle/EPP/oraarch[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_activate_fs(2.283):/oracle/EPP/oraarch[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.284):/oracle/EPP/oraarch[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.308):/oracle/EPP/oraarch[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.311):/oracle/EPP/oraarch[amlog_trace:319] DATE=2023-01-28T17:10:42.246842 +epprd_rg:cl_activate_fs(2.311):/oracle/EPP/oraarch[amlog_trace:320] echo '|2023-01-28T17:10:42.246842|INFO: Activating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_activate_fs(2.311):/oracle/EPP/oraarch[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.311):/oracle/EPP/oraarch[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(2.314):/oracle/EPP/oraarch[fs_mount:162] : Try to mount filesystem /oracle/EPP/oraarch at Jan 28 17:10:42.000 +epprd_rg:cl_activate_fs(2.314):/oracle/EPP/oraarch[fs_mount:163] mount /oracle/EPP/oraarch +epprd_rg:cl_activate_fs(2.325):/oracle/EPP/oraarch[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.325):/oracle/EPP/oraarch[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(2.325):/oracle/EPP/oraarch[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(2.325):/oracle/EPP/oraarch[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/oraarch' +epprd_rg:cl_activate_fs(2.325):/oracle/EPP/oraarch[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.326):/oracle/EPP/oraarch[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.351):/oracle/EPP/oraarch[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.353):/oracle/EPP/oraarch[amlog_trace:319] DATE=2023-01-28T17:10:42.289280 +epprd_rg:cl_activate_fs(2.353):/oracle/EPP/oraarch[amlog_trace:320] echo '|2023-01-28T17:10:42.289280|INFO: Activating Filesystems completed|/oracle/EPP/oraarch' +epprd_rg:cl_activate_fs(2.353):/oracle/EPP/oraarch[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.354):/oracle/EPP/oraarch[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(2.354):/oracle/EPP/oraarch[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(2.354):/oracle/EPP/oraarch[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(2.354):/oracle/EPP/oraarch[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(2.354):/oracle/EPP/oraarch[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(2.354):/oracle/EPP/oraarch[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(2.354):/oracle/EPP/oraarch[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(2.354):/oracle/EPP/oraarch[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(2.354):/oracle/EPP/oraarch[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(2.354):/oracle/EPP/oraarch[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(2.355):/oracle/EPP/oraarch[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(2.356):/oracle/EPP/oraarch[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(2.357):/oracle/EPP/oraarch[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(2.357):/oracle/EPP/oraarch[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(2.357):/oracle/EPP/oraarch[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(2.357):/oracle/EPP/oraarch[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(2.358):/oracle/EPP/oraarch[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(2.358):/oracle/EPP/oraarch[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(2.358):/oracle/EPP/oraarch[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(2.358):/oracle/EPP/oraarch[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(2.238):/oracle/EPP/oraarch[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.6 \n\t lvname = oraarchlv \n\t label = /oracle/EPP/oraarch \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Dec 17 14:48:30 2022\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(2.358):/oracle/EPP/oraarch[fs_mount:249] chfs -a mountguard=yes /oracle/EPP/oraarch +epprd_rg:cl_activate_fs(2.359):/oracle/EPP/oraarch[fs_mount:249] CLUSTER_OVERRIDE=yes /oracle/EPP/oraarch is now guarded against concurrent mounts. +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/oraarch[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/origlogA +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[activate_fs_process_group:540] fs_mount /oracle/EPP/origlogA fsck epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[fs_mount:69] FS=/oracle/EPP/origlogA +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/origlogA +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(2.506):/oracle/EPP/origlogA[fs_mount:86] lsfs -c /oracle/EPP/origlogA +epprd_rg:cl_activate_fs(2.507):/oracle/EPP/origlogA[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(2.511):/oracle/EPP/origlogA[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(2.507):/oracle/EPP/origlogA[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogA:/dev/origlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(2.511):/oracle/EPP/origlogA[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(2.512):/oracle/EPP/origlogA[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.512):/oracle/EPP/origlogA[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(2.507):/oracle/EPP/origlogA[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogA:/dev/origlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(2.514):/oracle/EPP/origlogA[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(2.516):/oracle/EPP/origlogA[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(2.516):/oracle/EPP/origlogA[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(2.516):/oracle/EPP/origlogA[fs_mount:100] LV_name=origlogAlv +epprd_rg:cl_activate_fs(2.516):/oracle/EPP/origlogA[fs_mount:101] getlvcb -T -A origlogAlv +epprd_rg:cl_activate_fs(2.517):/oracle/EPP/origlogA[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(2.533):/oracle/EPP/origlogA[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(2.517):/oracle/EPP/origlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.12 \n\t lvname = origlogAlv \n\t label = /oracle/EPP/origlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:48 2022\n \t time modified = Sat Dec 17 14:48:35 2022\n ' +epprd_rg:cl_activate_fs(2.533):/oracle/EPP/origlogA[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(2.533):/oracle/EPP/origlogA[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.534):/oracle/EPP/origlogA[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(2.517):/oracle/EPP/origlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.12 \n\t lvname = origlogAlv \n\t label = /oracle/EPP/origlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:48 2022\n \t time modified = Sat Dec 17 14:48:35 2022\n ' +epprd_rg:cl_activate_fs(2.536):/oracle/EPP/origlogA[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(2.538):/oracle/EPP/origlogA[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(2.538):/oracle/EPP/origlogA[fs_mount:115] clodmget -q 'name = origlogAlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(2.541):/oracle/EPP/origlogA[fs_mount:115] CuAt_label=/oracle/EPP/origlogA +epprd_rg:cl_activate_fs(2.541):/oracle/EPP/origlogA[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/origlogA from /etc/filesystems +epprd_rg:cl_activate_fs(2.541):/oracle/EPP/origlogA[fs_mount:119] : should match /oracle/EPP/origlogA from CuAt ODM and /oracle/EPP/origlogA from the LVCB +epprd_rg:cl_activate_fs(2.541):/oracle/EPP/origlogA[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(2.541):/oracle/EPP/origlogA[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(2.541):/oracle/EPP/origlogA[fs_mount:123] [[ /oracle/EPP/origlogA != /oracle/EPP/origlogA ]] +epprd_rg:cl_activate_fs(2.541):/oracle/EPP/origlogA[fs_mount:128] [[ /oracle/EPP/origlogA != /oracle/EPP/origlogA ]] +epprd_rg:cl_activate_fs(2.541):/oracle/EPP/origlogA[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.541):/oracle/EPP/origlogA[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(2.541):/oracle/EPP/origlogA[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(2.561):/oracle/EPP/origlogA[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.561):/oracle/EPP/origlogA[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(2.561):/oracle/EPP/origlogA[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_activate_fs(2.561):/oracle/EPP/origlogA[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.562):/oracle/EPP/origlogA[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.590):/oracle/EPP/origlogA[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.593):/oracle/EPP/origlogA[amlog_trace:319] DATE=2023-01-28T17:10:42.528562 +epprd_rg:cl_activate_fs(2.593):/oracle/EPP/origlogA[amlog_trace:320] echo '|2023-01-28T17:10:42.528562|INFO: Activating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_activate_fs(2.593):/oracle/EPP/origlogA[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.593):/oracle/EPP/origlogA[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(2.595):/oracle/EPP/origlogA[fs_mount:162] : Try to mount filesystem /oracle/EPP/origlogA at Jan 28 17:10:42.000 +epprd_rg:cl_activate_fs(2.596):/oracle/EPP/origlogA[fs_mount:163] mount /oracle/EPP/origlogA +epprd_rg:cl_activate_fs(2.606):/oracle/EPP/origlogA[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.606):/oracle/EPP/origlogA[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(2.606):/oracle/EPP/origlogA[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(2.606):/oracle/EPP/origlogA[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/origlogA' +epprd_rg:cl_activate_fs(2.606):/oracle/EPP/origlogA[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.607):/oracle/EPP/origlogA[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.633):/oracle/EPP/origlogA[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.635):/oracle/EPP/origlogA[amlog_trace:319] DATE=2023-01-28T17:10:42.571054 +epprd_rg:cl_activate_fs(2.635):/oracle/EPP/origlogA[amlog_trace:320] echo '|2023-01-28T17:10:42.571054|INFO: Activating Filesystems completed|/oracle/EPP/origlogA' +epprd_rg:cl_activate_fs(2.635):/oracle/EPP/origlogA[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.635):/oracle/EPP/origlogA[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(2.635):/oracle/EPP/origlogA[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(2.635):/oracle/EPP/origlogA[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(2.635):/oracle/EPP/origlogA[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(2.635):/oracle/EPP/origlogA[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(2.635):/oracle/EPP/origlogA[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(2.635):/oracle/EPP/origlogA[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(2.636):/oracle/EPP/origlogA[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(2.636):/oracle/EPP/origlogA[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(2.636):/oracle/EPP/origlogA[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(2.636):/oracle/EPP/origlogA[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(2.640):/oracle/EPP/origlogA[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(2.641):/oracle/EPP/origlogA[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(2.641):/oracle/EPP/origlogA[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(2.642):/oracle/EPP/origlogA[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(2.642):/oracle/EPP/origlogA[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(2.642):/oracle/EPP/origlogA[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(2.642):/oracle/EPP/origlogA[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(2.642):/oracle/EPP/origlogA[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(2.642):/oracle/EPP/origlogA[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(2.517):/oracle/EPP/origlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.12 \n\t lvname = origlogAlv \n\t label = /oracle/EPP/origlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:48 2022\n \t time modified = Sat Dec 17 14:48:35 2022\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(2.642):/oracle/EPP/origlogA[fs_mount:249] chfs -a mountguard=yes /oracle/EPP/origlogA +epprd_rg:cl_activate_fs(2.643):/oracle/EPP/origlogA[fs_mount:249] CLUSTER_OVERRIDE=yes /oracle/EPP/origlogA is now guarded against concurrent mounts. +epprd_rg:cl_activate_fs(2.789):/oracle/EPP/origlogA[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(2.789):/oracle/EPP/origlogB[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/origlogB +epprd_rg:cl_activate_fs(2.789):/oracle/EPP/origlogB[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(2.789):/oracle/EPP/origlogB[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(2.789):/oracle/EPP/origlogB[activate_fs_process_group:540] fs_mount /oracle/EPP/origlogB fsck epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(2.789):/oracle/EPP/origlogB[fs_mount:69] FS=/oracle/EPP/origlogB +epprd_rg:cl_activate_fs(2.789):/oracle/EPP/origlogB[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(2.789):/oracle/EPP/origlogB[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(2.789):/oracle/EPP/origlogB[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(2.789):/oracle/EPP/origlogB[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(2.789):/oracle/EPP/origlogB[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(2.789):/oracle/EPP/origlogB[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.789):/oracle/EPP/origlogB[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(2.789):/oracle/EPP/origlogB[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(2.789):/oracle/EPP/origlogB[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(2.789):/oracle/EPP/origlogB[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(2.789):/oracle/EPP/origlogB[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(2.789):/oracle/EPP/origlogB[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(2.789):/oracle/EPP/origlogB[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(2.789):/oracle/EPP/origlogB[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(2.789):/oracle/EPP/origlogB[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(2.789):/oracle/EPP/origlogB[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(2.790):/oracle/EPP/origlogB[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/origlogB +epprd_rg:cl_activate_fs(2.790):/oracle/EPP/origlogB[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(2.790):/oracle/EPP/origlogB[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(2.790):/oracle/EPP/origlogB[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(2.790):/oracle/EPP/origlogB[fs_mount:86] lsfs -c /oracle/EPP/origlogB +epprd_rg:cl_activate_fs(2.790):/oracle/EPP/origlogB[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(2.795):/oracle/EPP/origlogB[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(2.791):/oracle/EPP/origlogB[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogB:/dev/origlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(2.795):/oracle/EPP/origlogB[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(2.795):/oracle/EPP/origlogB[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.796):/oracle/EPP/origlogB[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(2.791):/oracle/EPP/origlogB[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogB:/dev/origlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(2.798):/oracle/EPP/origlogB[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(2.800):/oracle/EPP/origlogB[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(2.800):/oracle/EPP/origlogB[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(2.800):/oracle/EPP/origlogB[fs_mount:100] LV_name=origlogBlv +epprd_rg:cl_activate_fs(2.801):/oracle/EPP/origlogB[fs_mount:101] getlvcb -T -A origlogBlv +epprd_rg:cl_activate_fs(2.801):/oracle/EPP/origlogB[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(2.818):/oracle/EPP/origlogB[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(2.802):/oracle/EPP/origlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.13 \n\t lvname = origlogBlv \n\t label = /oracle/EPP/origlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Dec 17 14:48:35 2022\n ' +epprd_rg:cl_activate_fs(2.818):/oracle/EPP/origlogB[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(2.818):/oracle/EPP/origlogB[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.819):/oracle/EPP/origlogB[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(2.802):/oracle/EPP/origlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.13 \n\t lvname = origlogBlv \n\t label = /oracle/EPP/origlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Dec 17 14:48:35 2022\n ' +epprd_rg:cl_activate_fs(2.821):/oracle/EPP/origlogB[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(2.823):/oracle/EPP/origlogB[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(2.823):/oracle/EPP/origlogB[fs_mount:115] clodmget -q 'name = origlogBlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(2.826):/oracle/EPP/origlogB[fs_mount:115] CuAt_label=/oracle/EPP/origlogB +epprd_rg:cl_activate_fs(2.826):/oracle/EPP/origlogB[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/origlogB from /etc/filesystems +epprd_rg:cl_activate_fs(2.826):/oracle/EPP/origlogB[fs_mount:119] : should match /oracle/EPP/origlogB from CuAt ODM and /oracle/EPP/origlogB from the LVCB +epprd_rg:cl_activate_fs(2.826):/oracle/EPP/origlogB[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(2.826):/oracle/EPP/origlogB[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(2.826):/oracle/EPP/origlogB[fs_mount:123] [[ /oracle/EPP/origlogB != /oracle/EPP/origlogB ]] +epprd_rg:cl_activate_fs(2.827):/oracle/EPP/origlogB[fs_mount:128] [[ /oracle/EPP/origlogB != /oracle/EPP/origlogB ]] +epprd_rg:cl_activate_fs(2.827):/oracle/EPP/origlogB[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.827):/oracle/EPP/origlogB[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(2.827):/oracle/EPP/origlogB[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(2.846):/oracle/EPP/origlogB[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.846):/oracle/EPP/origlogB[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(2.846):/oracle/EPP/origlogB[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_activate_fs(2.846):/oracle/EPP/origlogB[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.847):/oracle/EPP/origlogB[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.872):/oracle/EPP/origlogB[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.875):/oracle/EPP/origlogB[amlog_trace:319] DATE=2023-01-28T17:10:42.810288 +epprd_rg:cl_activate_fs(2.875):/oracle/EPP/origlogB[amlog_trace:320] echo '|2023-01-28T17:10:42.810288|INFO: Activating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_activate_fs(2.875):/oracle/EPP/origlogB[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.875):/oracle/EPP/origlogB[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(2.877):/oracle/EPP/origlogB[fs_mount:162] : Try to mount filesystem /oracle/EPP/origlogB at Jan 28 17:10:42.000 +epprd_rg:cl_activate_fs(2.877):/oracle/EPP/origlogB[fs_mount:163] mount /oracle/EPP/origlogB +epprd_rg:cl_activate_fs(2.888):/oracle/EPP/origlogB[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.888):/oracle/EPP/origlogB[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(2.888):/oracle/EPP/origlogB[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(2.888):/oracle/EPP/origlogB[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/origlogB' +epprd_rg:cl_activate_fs(2.888):/oracle/EPP/origlogB[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.889):/oracle/EPP/origlogB[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.914):/oracle/EPP/origlogB[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.917):/oracle/EPP/origlogB[amlog_trace:319] DATE=2023-01-28T17:10:42.852850 +epprd_rg:cl_activate_fs(2.917):/oracle/EPP/origlogB[amlog_trace:320] echo '|2023-01-28T17:10:42.852850|INFO: Activating Filesystems completed|/oracle/EPP/origlogB' +epprd_rg:cl_activate_fs(2.917):/oracle/EPP/origlogB[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.917):/oracle/EPP/origlogB[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(2.917):/oracle/EPP/origlogB[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(2.917):/oracle/EPP/origlogB[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(2.917):/oracle/EPP/origlogB[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(2.917):/oracle/EPP/origlogB[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(2.917):/oracle/EPP/origlogB[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(2.917):/oracle/EPP/origlogB[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(2.917):/oracle/EPP/origlogB[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(2.917):/oracle/EPP/origlogB[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(2.917):/oracle/EPP/origlogB[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(2.918):/oracle/EPP/origlogB[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(2.921):/oracle/EPP/origlogB[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(2.923):/oracle/EPP/origlogB[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(2.923):/oracle/EPP/origlogB[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(2.923):/oracle/EPP/origlogB[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(2.923):/oracle/EPP/origlogB[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(2.923):/oracle/EPP/origlogB[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(2.923):/oracle/EPP/origlogB[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(2.923):/oracle/EPP/origlogB[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(2.923):/oracle/EPP/origlogB[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(2.802):/oracle/EPP/origlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.13 \n\t lvname = origlogBlv \n\t label = /oracle/EPP/origlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Dec 17 14:48:35 2022\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(2.923):/oracle/EPP/origlogB[fs_mount:249] chfs -a mountguard=yes /oracle/EPP/origlogB +epprd_rg:cl_activate_fs(2.924):/oracle/EPP/origlogB[fs_mount:249] CLUSTER_OVERRIDE=yes /oracle/EPP/origlogB is now guarded against concurrent mounts. +epprd_rg:cl_activate_fs(3.071):/oracle/EPP/origlogB[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(3.071):/oracle/EPP/sapdata1[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(3.071):/oracle/EPP/sapdata1[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(3.071):/oracle/EPP/sapdata1[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(3.071):/oracle/EPP/sapdata1[activate_fs_process_group:540] fs_mount /oracle/EPP/sapdata1 fsck epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(3.071):/oracle/EPP/sapdata1[fs_mount:69] FS=/oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(3.071):/oracle/EPP/sapdata1[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(3.071):/oracle/EPP/sapdata1[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(3.072):/oracle/EPP/sapdata1[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(3.072):/oracle/EPP/sapdata1[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(3.072):/oracle/EPP/sapdata1[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(3.072):/oracle/EPP/sapdata1[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(3.072):/oracle/EPP/sapdata1[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(3.072):/oracle/EPP/sapdata1[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(3.072):/oracle/EPP/sapdata1[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(3.072):/oracle/EPP/sapdata1[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(3.072):/oracle/EPP/sapdata1[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(3.072):/oracle/EPP/sapdata1[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(3.072):/oracle/EPP/sapdata1[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(3.072):/oracle/EPP/sapdata1[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(3.072):/oracle/EPP/sapdata1[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(3.072):/oracle/EPP/sapdata1[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(3.072):/oracle/EPP/sapdata1[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(3.072):/oracle/EPP/sapdata1[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(3.072):/oracle/EPP/sapdata1[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(3.072):/oracle/EPP/sapdata1[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(3.072):/oracle/EPP/sapdata1[fs_mount:86] lsfs -c /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(3.073):/oracle/EPP/sapdata1[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(3.077):/oracle/EPP/sapdata1[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(3.073):/oracle/EPP/sapdata1[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata1:/dev/sapdata1lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(3.077):/oracle/EPP/sapdata1[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(3.077):/oracle/EPP/sapdata1[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(3.078):/oracle/EPP/sapdata1[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(3.073):/oracle/EPP/sapdata1[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata1:/dev/sapdata1lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(3.080):/oracle/EPP/sapdata1[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(3.082):/oracle/EPP/sapdata1[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(3.082):/oracle/EPP/sapdata1[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(3.082):/oracle/EPP/sapdata1[fs_mount:100] LV_name=sapdata1lv +epprd_rg:cl_activate_fs(3.082):/oracle/EPP/sapdata1[fs_mount:101] getlvcb -T -A sapdata1lv +epprd_rg:cl_activate_fs(3.083):/oracle/EPP/sapdata1[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(3.100):/oracle/EPP/sapdata1[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(3.083):/oracle/EPP/sapdata1[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.7 \n\t lvname = sapdata1lv \n\t label = /oracle/EPP/sapdata1 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:44 2022\n \t time modified = Sat Dec 17 14:48:31 2022\n ' +epprd_rg:cl_activate_fs(3.100):/oracle/EPP/sapdata1[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(3.100):/oracle/EPP/sapdata1[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(3.101):/oracle/EPP/sapdata1[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(3.083):/oracle/EPP/sapdata1[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.7 \n\t lvname = sapdata1lv \n\t label = /oracle/EPP/sapdata1 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:44 2022\n \t time modified = Sat Dec 17 14:48:31 2022\n ' +epprd_rg:cl_activate_fs(3.103):/oracle/EPP/sapdata1[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(3.105):/oracle/EPP/sapdata1[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(3.105):/oracle/EPP/sapdata1[fs_mount:115] clodmget -q 'name = sapdata1lv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(3.108):/oracle/EPP/sapdata1[fs_mount:115] CuAt_label=/oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(3.108):/oracle/EPP/sapdata1[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/sapdata1 from /etc/filesystems +epprd_rg:cl_activate_fs(3.108):/oracle/EPP/sapdata1[fs_mount:119] : should match /oracle/EPP/sapdata1 from CuAt ODM and /oracle/EPP/sapdata1 from the LVCB +epprd_rg:cl_activate_fs(3.108):/oracle/EPP/sapdata1[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(3.108):/oracle/EPP/sapdata1[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(3.108):/oracle/EPP/sapdata1[fs_mount:123] [[ /oracle/EPP/sapdata1 != /oracle/EPP/sapdata1 ]] +epprd_rg:cl_activate_fs(3.108):/oracle/EPP/sapdata1[fs_mount:128] [[ /oracle/EPP/sapdata1 != /oracle/EPP/sapdata1 ]] +epprd_rg:cl_activate_fs(3.108):/oracle/EPP/sapdata1[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(3.108):/oracle/EPP/sapdata1[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(3.108):/oracle/EPP/sapdata1[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(3.128):/oracle/EPP/sapdata1[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(3.128):/oracle/EPP/sapdata1[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(3.128):/oracle/EPP/sapdata1[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_activate_fs(3.128):/oracle/EPP/sapdata1[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(3.129):/oracle/EPP/sapdata1[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(3.154):/oracle/EPP/sapdata1[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(3.156):/oracle/EPP/sapdata1[amlog_trace:319] DATE=2023-01-28T17:10:43.092235 +epprd_rg:cl_activate_fs(3.157):/oracle/EPP/sapdata1[amlog_trace:320] echo '|2023-01-28T17:10:43.092235|INFO: Activating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_activate_fs(3.157):/oracle/EPP/sapdata1[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(3.157):/oracle/EPP/sapdata1[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(3.159):/oracle/EPP/sapdata1[fs_mount:162] : Try to mount filesystem /oracle/EPP/sapdata1 at Jan 28 17:10:43.000 +epprd_rg:cl_activate_fs(3.159):/oracle/EPP/sapdata1[fs_mount:163] mount /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(3.170):/oracle/EPP/sapdata1[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(3.170):/oracle/EPP/sapdata1[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(3.170):/oracle/EPP/sapdata1[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(3.171):/oracle/EPP/sapdata1[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/sapdata1' +epprd_rg:cl_activate_fs(3.171):/oracle/EPP/sapdata1[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(3.171):/oracle/EPP/sapdata1[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(3.197):/oracle/EPP/sapdata1[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(3.200):/oracle/EPP/sapdata1[amlog_trace:319] DATE=2023-01-28T17:10:43.135400 +epprd_rg:cl_activate_fs(3.200):/oracle/EPP/sapdata1[amlog_trace:320] echo '|2023-01-28T17:10:43.135400|INFO: Activating Filesystems completed|/oracle/EPP/sapdata1' +epprd_rg:cl_activate_fs(3.200):/oracle/EPP/sapdata1[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(3.200):/oracle/EPP/sapdata1[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(3.200):/oracle/EPP/sapdata1[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(3.200):/oracle/EPP/sapdata1[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(3.200):/oracle/EPP/sapdata1[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(3.200):/oracle/EPP/sapdata1[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(3.200):/oracle/EPP/sapdata1[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(3.200):/oracle/EPP/sapdata1[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(3.200):/oracle/EPP/sapdata1[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(3.200):/oracle/EPP/sapdata1[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(3.200):/oracle/EPP/sapdata1[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(3.201):/oracle/EPP/sapdata1[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(3.204):/oracle/EPP/sapdata1[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(3.206):/oracle/EPP/sapdata1[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(3.206):/oracle/EPP/sapdata1[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(3.206):/oracle/EPP/sapdata1[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(3.206):/oracle/EPP/sapdata1[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(3.206):/oracle/EPP/sapdata1[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(3.206):/oracle/EPP/sapdata1[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(3.206):/oracle/EPP/sapdata1[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(3.206):/oracle/EPP/sapdata1[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(3.083):/oracle/EPP/sapdata1[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.7 \n\t lvname = sapdata1lv \n\t label = /oracle/EPP/sapdata1 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:44 2022\n \t time modified = Sat Dec 17 14:48:31 2022\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(3.206):/oracle/EPP/sapdata1[fs_mount:249] chfs -a mountguard=yes /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(3.207):/oracle/EPP/sapdata1[fs_mount:249] CLUSTER_OVERRIDE=yes /oracle/EPP/sapdata1 is now guarded against concurrent mounts. +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata1[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[activate_fs_process_group:540] fs_mount /oracle/EPP/sapdata2 fsck epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[fs_mount:69] FS=/oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(3.354):/oracle/EPP/sapdata2[fs_mount:86] lsfs -c /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(3.355):/oracle/EPP/sapdata2[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(3.360):/oracle/EPP/sapdata2[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(3.355):/oracle/EPP/sapdata2[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata2:/dev/sapdata2lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(3.360):/oracle/EPP/sapdata2[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(3.360):/oracle/EPP/sapdata2[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(3.361):/oracle/EPP/sapdata2[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(3.355):/oracle/EPP/sapdata2[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata2:/dev/sapdata2lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(3.363):/oracle/EPP/sapdata2[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(3.365):/oracle/EPP/sapdata2[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(3.365):/oracle/EPP/sapdata2[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(3.365):/oracle/EPP/sapdata2[fs_mount:100] LV_name=sapdata2lv +epprd_rg:cl_activate_fs(3.365):/oracle/EPP/sapdata2[fs_mount:101] getlvcb -T -A sapdata2lv +epprd_rg:cl_activate_fs(3.366):/oracle/EPP/sapdata2[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(3.382):/oracle/EPP/sapdata2[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(3.366):/oracle/EPP/sapdata2[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.8 \n\t lvname = sapdata2lv \n\t label = /oracle/EPP/sapdata2 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:45 2022\n \t time modified = Sat Dec 17 14:48:32 2022\n ' +epprd_rg:cl_activate_fs(3.382):/oracle/EPP/sapdata2[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(3.382):/oracle/EPP/sapdata2[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(3.383):/oracle/EPP/sapdata2[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(3.366):/oracle/EPP/sapdata2[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.8 \n\t lvname = sapdata2lv \n\t label = /oracle/EPP/sapdata2 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:45 2022\n \t time modified = Sat Dec 17 14:48:32 2022\n ' +epprd_rg:cl_activate_fs(3.385):/oracle/EPP/sapdata2[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(3.387):/oracle/EPP/sapdata2[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(3.388):/oracle/EPP/sapdata2[fs_mount:115] clodmget -q 'name = sapdata2lv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(3.391):/oracle/EPP/sapdata2[fs_mount:115] CuAt_label=/oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(3.391):/oracle/EPP/sapdata2[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/sapdata2 from /etc/filesystems +epprd_rg:cl_activate_fs(3.391):/oracle/EPP/sapdata2[fs_mount:119] : should match /oracle/EPP/sapdata2 from CuAt ODM and /oracle/EPP/sapdata2 from the LVCB +epprd_rg:cl_activate_fs(3.391):/oracle/EPP/sapdata2[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(3.391):/oracle/EPP/sapdata2[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(3.391):/oracle/EPP/sapdata2[fs_mount:123] [[ /oracle/EPP/sapdata2 != /oracle/EPP/sapdata2 ]] +epprd_rg:cl_activate_fs(3.391):/oracle/EPP/sapdata2[fs_mount:128] [[ /oracle/EPP/sapdata2 != /oracle/EPP/sapdata2 ]] +epprd_rg:cl_activate_fs(3.391):/oracle/EPP/sapdata2[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(3.391):/oracle/EPP/sapdata2[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(3.391):/oracle/EPP/sapdata2[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(3.411):/oracle/EPP/sapdata2[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(3.411):/oracle/EPP/sapdata2[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(3.411):/oracle/EPP/sapdata2[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_activate_fs(3.411):/oracle/EPP/sapdata2[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(3.411):/oracle/EPP/sapdata2[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(3.437):/oracle/EPP/sapdata2[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(3.440):/oracle/EPP/sapdata2[amlog_trace:319] DATE=2023-01-28T17:10:43.375387 +epprd_rg:cl_activate_fs(3.440):/oracle/EPP/sapdata2[amlog_trace:320] echo '|2023-01-28T17:10:43.375387|INFO: Activating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_activate_fs(3.440):/oracle/EPP/sapdata2[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(3.440):/oracle/EPP/sapdata2[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(3.442):/oracle/EPP/sapdata2[fs_mount:162] : Try to mount filesystem /oracle/EPP/sapdata2 at Jan 28 17:10:43.000 +epprd_rg:cl_activate_fs(3.442):/oracle/EPP/sapdata2[fs_mount:163] mount /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(3.453):/oracle/EPP/sapdata2[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(3.453):/oracle/EPP/sapdata2[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(3.453):/oracle/EPP/sapdata2[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(3.453):/oracle/EPP/sapdata2[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/sapdata2' +epprd_rg:cl_activate_fs(3.453):/oracle/EPP/sapdata2[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(3.454):/oracle/EPP/sapdata2[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(3.480):/oracle/EPP/sapdata2[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(3.483):/oracle/EPP/sapdata2[amlog_trace:319] DATE=2023-01-28T17:10:43.418289 +epprd_rg:cl_activate_fs(3.483):/oracle/EPP/sapdata2[amlog_trace:320] echo '|2023-01-28T17:10:43.418289|INFO: Activating Filesystems completed|/oracle/EPP/sapdata2' +epprd_rg:cl_activate_fs(3.483):/oracle/EPP/sapdata2[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(3.483):/oracle/EPP/sapdata2[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(3.483):/oracle/EPP/sapdata2[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(3.483):/oracle/EPP/sapdata2[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(3.483):/oracle/EPP/sapdata2[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(3.483):/oracle/EPP/sapdata2[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(3.483):/oracle/EPP/sapdata2[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(3.483):/oracle/EPP/sapdata2[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(3.483):/oracle/EPP/sapdata2[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(3.483):/oracle/EPP/sapdata2[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(3.483):/oracle/EPP/sapdata2[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(3.484):/oracle/EPP/sapdata2[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(3.487):/oracle/EPP/sapdata2[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(3.489):/oracle/EPP/sapdata2[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(3.489):/oracle/EPP/sapdata2[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(3.489):/oracle/EPP/sapdata2[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(3.489):/oracle/EPP/sapdata2[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(3.489):/oracle/EPP/sapdata2[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(3.489):/oracle/EPP/sapdata2[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(3.489):/oracle/EPP/sapdata2[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(3.489):/oracle/EPP/sapdata2[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(3.366):/oracle/EPP/sapdata2[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.8 \n\t lvname = sapdata2lv \n\t label = /oracle/EPP/sapdata2 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:45 2022\n \t time modified = Sat Dec 17 14:48:32 2022\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(3.489):/oracle/EPP/sapdata2[fs_mount:249] chfs -a mountguard=yes /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(3.490):/oracle/EPP/sapdata2[fs_mount:249] CLUSTER_OVERRIDE=yes /oracle/EPP/sapdata2 is now guarded against concurrent mounts. +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata2[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[activate_fs_process_group:540] fs_mount /oracle/EPP/sapdata3 fsck epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[fs_mount:69] FS=/oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(3.637):/oracle/EPP/sapdata3[fs_mount:86] lsfs -c /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(3.638):/oracle/EPP/sapdata3[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(3.642):/oracle/EPP/sapdata3[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(3.638):/oracle/EPP/sapdata3[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata3:/dev/sapdata3lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(3.643):/oracle/EPP/sapdata3[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(3.643):/oracle/EPP/sapdata3[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(3.643):/oracle/EPP/sapdata3[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(3.638):/oracle/EPP/sapdata3[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata3:/dev/sapdata3lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(3.645):/oracle/EPP/sapdata3[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(3.647):/oracle/EPP/sapdata3[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(3.647):/oracle/EPP/sapdata3[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(3.647):/oracle/EPP/sapdata3[fs_mount:100] LV_name=sapdata3lv +epprd_rg:cl_activate_fs(3.647):/oracle/EPP/sapdata3[fs_mount:101] getlvcb -T -A sapdata3lv +epprd_rg:cl_activate_fs(3.648):/oracle/EPP/sapdata3[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(3.665):/oracle/EPP/sapdata3[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(3.648):/oracle/EPP/sapdata3[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.9 \n\t lvname = sapdata3lv \n\t label = /oracle/EPP/sapdata3 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Dec 17 14:48:33 2022\n ' +epprd_rg:cl_activate_fs(3.665):/oracle/EPP/sapdata3[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(3.665):/oracle/EPP/sapdata3[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(3.666):/oracle/EPP/sapdata3[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(3.648):/oracle/EPP/sapdata3[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.9 \n\t lvname = sapdata3lv \n\t label = /oracle/EPP/sapdata3 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Dec 17 14:48:33 2022\n ' +epprd_rg:cl_activate_fs(3.668):/oracle/EPP/sapdata3[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(3.670):/oracle/EPP/sapdata3[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(3.670):/oracle/EPP/sapdata3[fs_mount:115] clodmget -q 'name = sapdata3lv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(3.673):/oracle/EPP/sapdata3[fs_mount:115] CuAt_label=/oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(3.673):/oracle/EPP/sapdata3[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/sapdata3 from /etc/filesystems +epprd_rg:cl_activate_fs(3.673):/oracle/EPP/sapdata3[fs_mount:119] : should match /oracle/EPP/sapdata3 from CuAt ODM and /oracle/EPP/sapdata3 from the LVCB +epprd_rg:cl_activate_fs(3.673):/oracle/EPP/sapdata3[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(3.673):/oracle/EPP/sapdata3[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(3.674):/oracle/EPP/sapdata3[fs_mount:123] [[ /oracle/EPP/sapdata3 != /oracle/EPP/sapdata3 ]] +epprd_rg:cl_activate_fs(3.674):/oracle/EPP/sapdata3[fs_mount:128] [[ /oracle/EPP/sapdata3 != /oracle/EPP/sapdata3 ]] +epprd_rg:cl_activate_fs(3.674):/oracle/EPP/sapdata3[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(3.674):/oracle/EPP/sapdata3[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(3.674):/oracle/EPP/sapdata3[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(3.693):/oracle/EPP/sapdata3[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(3.693):/oracle/EPP/sapdata3[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(3.693):/oracle/EPP/sapdata3[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_activate_fs(3.693):/oracle/EPP/sapdata3[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(3.694):/oracle/EPP/sapdata3[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(3.719):/oracle/EPP/sapdata3[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(3.722):/oracle/EPP/sapdata3[amlog_trace:319] DATE=2023-01-28T17:10:43.657441 +epprd_rg:cl_activate_fs(3.722):/oracle/EPP/sapdata3[amlog_trace:320] echo '|2023-01-28T17:10:43.657441|INFO: Activating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_activate_fs(3.722):/oracle/EPP/sapdata3[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(3.722):/oracle/EPP/sapdata3[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(3.724):/oracle/EPP/sapdata3[fs_mount:162] : Try to mount filesystem /oracle/EPP/sapdata3 at Jan 28 17:10:43.000 +epprd_rg:cl_activate_fs(3.724):/oracle/EPP/sapdata3[fs_mount:163] mount /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(3.735):/oracle/EPP/sapdata3[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(3.735):/oracle/EPP/sapdata3[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(3.735):/oracle/EPP/sapdata3[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(3.735):/oracle/EPP/sapdata3[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/sapdata3' +epprd_rg:cl_activate_fs(3.735):/oracle/EPP/sapdata3[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(3.736):/oracle/EPP/sapdata3[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(3.761):/oracle/EPP/sapdata3[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(3.764):/oracle/EPP/sapdata3[amlog_trace:319] DATE=2023-01-28T17:10:43.699899 +epprd_rg:cl_activate_fs(3.764):/oracle/EPP/sapdata3[amlog_trace:320] echo '|2023-01-28T17:10:43.699899|INFO: Activating Filesystems completed|/oracle/EPP/sapdata3' +epprd_rg:cl_activate_fs(3.764):/oracle/EPP/sapdata3[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(3.764):/oracle/EPP/sapdata3[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(3.764):/oracle/EPP/sapdata3[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(3.764):/oracle/EPP/sapdata3[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(3.764):/oracle/EPP/sapdata3[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(3.764):/oracle/EPP/sapdata3[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(3.764):/oracle/EPP/sapdata3[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(3.764):/oracle/EPP/sapdata3[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(3.764):/oracle/EPP/sapdata3[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(3.764):/oracle/EPP/sapdata3[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(3.764):/oracle/EPP/sapdata3[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(3.765):/oracle/EPP/sapdata3[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(3.768):/oracle/EPP/sapdata3[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(3.770):/oracle/EPP/sapdata3[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(3.770):/oracle/EPP/sapdata3[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(3.770):/oracle/EPP/sapdata3[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(3.770):/oracle/EPP/sapdata3[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(3.770):/oracle/EPP/sapdata3[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(3.770):/oracle/EPP/sapdata3[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(3.770):/oracle/EPP/sapdata3[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(3.770):/oracle/EPP/sapdata3[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(3.648):/oracle/EPP/sapdata3[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.9 \n\t lvname = sapdata3lv \n\t label = /oracle/EPP/sapdata3 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Dec 17 14:48:33 2022\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(3.770):/oracle/EPP/sapdata3[fs_mount:249] chfs -a mountguard=yes /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(3.771):/oracle/EPP/sapdata3[fs_mount:249] CLUSTER_OVERRIDE=yes /oracle/EPP/sapdata3 is now guarded against concurrent mounts. +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata3[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[activate_fs_process_group:540] fs_mount /oracle/EPP/sapdata4 fsck epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[fs_mount:69] FS=/oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(3.918):/oracle/EPP/sapdata4[fs_mount:86] lsfs -c /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(3.919):/oracle/EPP/sapdata4[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(3.924):/oracle/EPP/sapdata4[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(3.919):/oracle/EPP/sapdata4[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata4:/dev/sapdata4lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(3.924):/oracle/EPP/sapdata4[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(3.924):/oracle/EPP/sapdata4[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(3.925):/oracle/EPP/sapdata4[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(3.919):/oracle/EPP/sapdata4[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata4:/dev/sapdata4lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(3.927):/oracle/EPP/sapdata4[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(3.929):/oracle/EPP/sapdata4[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(3.929):/oracle/EPP/sapdata4[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(3.929):/oracle/EPP/sapdata4[fs_mount:100] LV_name=sapdata4lv +epprd_rg:cl_activate_fs(3.929):/oracle/EPP/sapdata4[fs_mount:101] getlvcb -T -A sapdata4lv +epprd_rg:cl_activate_fs(3.930):/oracle/EPP/sapdata4[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(3.947):/oracle/EPP/sapdata4[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(3.930):/oracle/EPP/sapdata4[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.10 \n\t lvname = sapdata4lv \n\t label = /oracle/EPP/sapdata4 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Dec 17 14:48:34 2022\n ' +epprd_rg:cl_activate_fs(3.947):/oracle/EPP/sapdata4[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(3.947):/oracle/EPP/sapdata4[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(3.948):/oracle/EPP/sapdata4[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(3.930):/oracle/EPP/sapdata4[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.10 \n\t lvname = sapdata4lv \n\t label = /oracle/EPP/sapdata4 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Dec 17 14:48:34 2022\n ' +epprd_rg:cl_activate_fs(3.950):/oracle/EPP/sapdata4[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(3.952):/oracle/EPP/sapdata4[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(3.952):/oracle/EPP/sapdata4[fs_mount:115] clodmget -q 'name = sapdata4lv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(3.955):/oracle/EPP/sapdata4[fs_mount:115] CuAt_label=/oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(3.955):/oracle/EPP/sapdata4[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/sapdata4 from /etc/filesystems +epprd_rg:cl_activate_fs(3.955):/oracle/EPP/sapdata4[fs_mount:119] : should match /oracle/EPP/sapdata4 from CuAt ODM and /oracle/EPP/sapdata4 from the LVCB +epprd_rg:cl_activate_fs(3.955):/oracle/EPP/sapdata4[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(3.955):/oracle/EPP/sapdata4[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(3.955):/oracle/EPP/sapdata4[fs_mount:123] [[ /oracle/EPP/sapdata4 != /oracle/EPP/sapdata4 ]] +epprd_rg:cl_activate_fs(3.955):/oracle/EPP/sapdata4[fs_mount:128] [[ /oracle/EPP/sapdata4 != /oracle/EPP/sapdata4 ]] +epprd_rg:cl_activate_fs(3.955):/oracle/EPP/sapdata4[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(3.955):/oracle/EPP/sapdata4[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(3.955):/oracle/EPP/sapdata4[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(3.975):/oracle/EPP/sapdata4[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(3.975):/oracle/EPP/sapdata4[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(3.975):/oracle/EPP/sapdata4[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_activate_fs(3.975):/oracle/EPP/sapdata4[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(3.976):/oracle/EPP/sapdata4[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(4.001):/oracle/EPP/sapdata4[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(4.004):/oracle/EPP/sapdata4[amlog_trace:319] DATE=2023-01-28T17:10:43.939669 +epprd_rg:cl_activate_fs(4.004):/oracle/EPP/sapdata4[amlog_trace:320] echo '|2023-01-28T17:10:43.939669|INFO: Activating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_activate_fs(4.004):/oracle/EPP/sapdata4[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(4.004):/oracle/EPP/sapdata4[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(4.007):/oracle/EPP/sapdata4[fs_mount:162] : Try to mount filesystem /oracle/EPP/sapdata4 at Jan 28 17:10:43.000 +epprd_rg:cl_activate_fs(4.007):/oracle/EPP/sapdata4[fs_mount:163] mount /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(4.018):/oracle/EPP/sapdata4[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(4.018):/oracle/EPP/sapdata4[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(4.018):/oracle/EPP/sapdata4[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(4.018):/oracle/EPP/sapdata4[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/sapdata4' +epprd_rg:cl_activate_fs(4.018):/oracle/EPP/sapdata4[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(4.018):/oracle/EPP/sapdata4[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(4.044):/oracle/EPP/sapdata4[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(4.047):/oracle/EPP/sapdata4[amlog_trace:319] DATE=2023-01-28T17:10:43.982442 +epprd_rg:cl_activate_fs(4.047):/oracle/EPP/sapdata4[amlog_trace:320] echo '|2023-01-28T17:10:43.982442|INFO: Activating Filesystems completed|/oracle/EPP/sapdata4' +epprd_rg:cl_activate_fs(4.047):/oracle/EPP/sapdata4[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(4.047):/oracle/EPP/sapdata4[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(4.047):/oracle/EPP/sapdata4[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(4.047):/oracle/EPP/sapdata4[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(4.047):/oracle/EPP/sapdata4[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(4.047):/oracle/EPP/sapdata4[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(4.047):/oracle/EPP/sapdata4[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(4.047):/oracle/EPP/sapdata4[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(4.047):/oracle/EPP/sapdata4[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(4.047):/oracle/EPP/sapdata4[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(4.047):/oracle/EPP/sapdata4[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(4.048):/oracle/EPP/sapdata4[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(4.051):/oracle/EPP/sapdata4[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(4.053):/oracle/EPP/sapdata4[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(4.053):/oracle/EPP/sapdata4[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(4.053):/oracle/EPP/sapdata4[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(4.053):/oracle/EPP/sapdata4[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(4.053):/oracle/EPP/sapdata4[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(4.053):/oracle/EPP/sapdata4[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(4.053):/oracle/EPP/sapdata4[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(4.053):/oracle/EPP/sapdata4[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(3.930):/oracle/EPP/sapdata4[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.10 \n\t lvname = sapdata4lv \n\t label = /oracle/EPP/sapdata4 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Dec 17 14:48:34 2022\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(4.053):/oracle/EPP/sapdata4[fs_mount:249] chfs -a mountguard=yes /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(4.054):/oracle/EPP/sapdata4[fs_mount:249] CLUSTER_OVERRIDE=yes /oracle/EPP/sapdata4 is now guarded against concurrent mounts. +epprd_rg:cl_activate_fs(4.200):/oracle/EPP/sapdata4[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(4.200):/sapmnt[activate_fs_process_group:527] PS4_LOOP=/sapmnt +epprd_rg:cl_activate_fs(4.200):/sapmnt[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(4.200):/sapmnt[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(4.200):/sapmnt[activate_fs_process_group:540] fs_mount /sapmnt fsck epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(4.200):/sapmnt[fs_mount:69] FS=/sapmnt +epprd_rg:cl_activate_fs(4.200):/sapmnt[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(4.200):/sapmnt[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(4.200):/sapmnt[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(4.200):/sapmnt[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(4.200):/sapmnt[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(4.200):/sapmnt[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(4.200):/sapmnt[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(4.200):/sapmnt[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(4.200):/sapmnt[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(4.200):/sapmnt[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(4.200):/sapmnt[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(4.200):/sapmnt[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(4.200):/sapmnt[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(4.200):/sapmnt[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(4.200):/sapmnt[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(4.200):/sapmnt[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(4.200):/sapmnt[fs_mount:81] : Here check to see if the information in /etc/filesystems for /sapmnt +epprd_rg:cl_activate_fs(4.200):/sapmnt[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(4.200):/sapmnt[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(4.200):/sapmnt[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(4.200):/sapmnt[fs_mount:86] lsfs -c /sapmnt +epprd_rg:cl_activate_fs(4.201):/sapmnt[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(4.206):/sapmnt[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(4.201):/sapmnt[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/sapmnt:/dev/sapmntlv:jfs2:::20971520:rw:no:no' +epprd_rg:cl_activate_fs(4.206):/sapmnt[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(4.206):/sapmnt[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(4.207):/sapmnt[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(4.201):/sapmnt[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/sapmnt:/dev/sapmntlv:jfs2:::20971520:rw:no:no' +epprd_rg:cl_activate_fs(4.209):/sapmnt[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(4.211):/sapmnt[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(4.211):/sapmnt[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(4.211):/sapmnt[fs_mount:100] LV_name=sapmntlv +epprd_rg:cl_activate_fs(4.211):/sapmnt[fs_mount:101] getlvcb -T -A sapmntlv +epprd_rg:cl_activate_fs(4.212):/sapmnt[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(4.228):/sapmnt[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(4.212):/sapmnt[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.3 \n\t lvname = sapmntlv \n\t label = /sapmnt \n\t machine id = 44AF14B00 \n\t number lps = 20 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:41 2022\n \t time modified = Sat Dec 17 14:48:08 2022\n ' +epprd_rg:cl_activate_fs(4.228):/sapmnt[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(4.228):/sapmnt[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(4.229):/sapmnt[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(4.212):/sapmnt[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.3 \n\t lvname = sapmntlv \n\t label = /sapmnt \n\t machine id = 44AF14B00 \n\t number lps = 20 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:41 2022\n \t time modified = Sat Dec 17 14:48:08 2022\n ' +epprd_rg:cl_activate_fs(4.231):/sapmnt[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(4.233):/sapmnt[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(4.233):/sapmnt[fs_mount:115] clodmget -q 'name = sapmntlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(4.236):/sapmnt[fs_mount:115] CuAt_label=/sapmnt +epprd_rg:cl_activate_fs(4.236):/sapmnt[fs_mount:118] : At this point, if things are working correctly, /sapmnt from /etc/filesystems +epprd_rg:cl_activate_fs(4.236):/sapmnt[fs_mount:119] : should match /sapmnt from CuAt ODM and /sapmnt from the LVCB +epprd_rg:cl_activate_fs(4.236):/sapmnt[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(4.236):/sapmnt[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(4.236):/sapmnt[fs_mount:123] [[ /sapmnt != /sapmnt ]] +epprd_rg:cl_activate_fs(4.236):/sapmnt[fs_mount:128] [[ /sapmnt != /sapmnt ]] +epprd_rg:cl_activate_fs(4.236):/sapmnt[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(4.236):/sapmnt[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(4.236):/sapmnt[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(4.256):/sapmnt[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(4.256):/sapmnt[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(4.256):/sapmnt[fs_mount:160] amlog_trace '' 'Activating Filesystem|/sapmnt' +epprd_rg:cl_activate_fs(4.256):/sapmnt[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(4.257):/sapmnt[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(4.282):/sapmnt[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(4.284):/sapmnt[amlog_trace:319] DATE=2023-01-28T17:10:44.220211 +epprd_rg:cl_activate_fs(4.284):/sapmnt[amlog_trace:320] echo '|2023-01-28T17:10:44.220211|INFO: Activating Filesystem|/sapmnt' +epprd_rg:cl_activate_fs(4.285):/sapmnt[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(4.285):/sapmnt[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(4.287):/sapmnt[fs_mount:162] : Try to mount filesystem /sapmnt at Jan 28 17:10:44.000 +epprd_rg:cl_activate_fs(4.287):/sapmnt[fs_mount:163] mount /sapmnt +epprd_rg:cl_activate_fs(4.298):/sapmnt[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(4.298):/sapmnt[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(4.298):/sapmnt[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(4.298):/sapmnt[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/sapmnt' +epprd_rg:cl_activate_fs(4.298):/sapmnt[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(4.299):/sapmnt[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(4.324):/sapmnt[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(4.327):/sapmnt[amlog_trace:319] DATE=2023-01-28T17:10:44.262684 +epprd_rg:cl_activate_fs(4.327):/sapmnt[amlog_trace:320] echo '|2023-01-28T17:10:44.262684|INFO: Activating Filesystems completed|/sapmnt' +epprd_rg:cl_activate_fs(4.327):/sapmnt[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(4.327):/sapmnt[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(4.327):/sapmnt[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(4.327):/sapmnt[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(4.327):/sapmnt[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(4.327):/sapmnt[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(4.327):/sapmnt[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(4.327):/sapmnt[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(4.327):/sapmnt[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(4.327):/sapmnt[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(4.327):/sapmnt[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(4.328):/sapmnt[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(4.331):/sapmnt[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(4.333):/sapmnt[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(4.333):/sapmnt[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(4.333):/sapmnt[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(4.333):/sapmnt[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(4.333):/sapmnt[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(4.333):/sapmnt[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(4.333):/sapmnt[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(4.333):/sapmnt[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(4.212):/sapmnt[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.3 \n\t lvname = sapmntlv \n\t label = /sapmnt \n\t machine id = 44AF14B00 \n\t number lps = 20 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:41 2022\n \t time modified = Sat Dec 17 14:48:08 2022\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(4.333):/sapmnt[fs_mount:249] chfs -a mountguard=yes /sapmnt +epprd_rg:cl_activate_fs(4.334):/sapmnt[fs_mount:249] CLUSTER_OVERRIDE=yes /sapmnt is now guarded against concurrent mounts. +epprd_rg:cl_activate_fs(4.480):/sapmnt[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(4.480):/usr/sap[activate_fs_process_group:527] PS4_LOOP=/usr/sap +epprd_rg:cl_activate_fs(4.480):/usr/sap[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(4.480):/usr/sap[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(4.480):/usr/sap[activate_fs_process_group:540] fs_mount /usr/sap fsck epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(4.480):/usr/sap[fs_mount:69] FS=/usr/sap +epprd_rg:cl_activate_fs(4.480):/usr/sap[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(4.480):/usr/sap[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(4.480):/usr/sap[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(4.480):/usr/sap[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739098 +epprd_rg:cl_activate_fs(4.480):/usr/sap[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(4.480):/usr/sap[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(4.480):/usr/sap[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(4.480):/usr/sap[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(4.480):/usr/sap[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(4.480):/usr/sap[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(4.480):/usr/sap[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(4.480):/usr/sap[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(4.480):/usr/sap[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(4.480):/usr/sap[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(4.480):/usr/sap[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(4.480):/usr/sap[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(4.480):/usr/sap[fs_mount:81] : Here check to see if the information in /etc/filesystems for /usr/sap +epprd_rg:cl_activate_fs(4.480):/usr/sap[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(4.481):/usr/sap[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(4.481):/usr/sap[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(4.481):/usr/sap[fs_mount:86] lsfs -c /usr/sap +epprd_rg:cl_activate_fs(4.481):/usr/sap[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(4.486):/usr/sap[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(4.482):/usr/sap[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/usr/sap:/dev/saplv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(4.486):/usr/sap[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(4.486):/usr/sap[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(4.487):/usr/sap[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(4.482):/usr/sap[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/usr/sap:/dev/saplv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(4.489):/usr/sap[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(4.491):/usr/sap[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(4.491):/usr/sap[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(4.491):/usr/sap[fs_mount:100] LV_name=saplv +epprd_rg:cl_activate_fs(4.491):/usr/sap[fs_mount:101] getlvcb -T -A saplv +epprd_rg:cl_activate_fs(4.492):/usr/sap[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(4.509):/usr/sap[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(4.492):/usr/sap[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.2 \n\t lvname = saplv \n\t label = /usr/sap \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:37 2022\n \t time modified = Sat Dec 17 14:48:05 2022\n ' +epprd_rg:cl_activate_fs(4.509):/usr/sap[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(4.509):/usr/sap[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(4.510):/usr/sap[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(4.492):/usr/sap[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.2 \n\t lvname = saplv \n\t label = /usr/sap \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:37 2022\n \t time modified = Sat Dec 17 14:48:05 2022\n ' +epprd_rg:cl_activate_fs(4.512):/usr/sap[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(4.513):/usr/sap[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(4.514):/usr/sap[fs_mount:115] clodmget -q 'name = saplv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(4.517):/usr/sap[fs_mount:115] CuAt_label=/usr/sap +epprd_rg:cl_activate_fs(4.517):/usr/sap[fs_mount:118] : At this point, if things are working correctly, /usr/sap from /etc/filesystems +epprd_rg:cl_activate_fs(4.517):/usr/sap[fs_mount:119] : should match /usr/sap from CuAt ODM and /usr/sap from the LVCB +epprd_rg:cl_activate_fs(4.517):/usr/sap[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(4.517):/usr/sap[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(4.517):/usr/sap[fs_mount:123] [[ /usr/sap != /usr/sap ]] +epprd_rg:cl_activate_fs(4.517):/usr/sap[fs_mount:128] [[ /usr/sap != /usr/sap ]] +epprd_rg:cl_activate_fs(4.517):/usr/sap[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(4.517):/usr/sap[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(4.517):/usr/sap[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(4.537):/usr/sap[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(4.537):/usr/sap[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(4.537):/usr/sap[fs_mount:160] amlog_trace '' 'Activating Filesystem|/usr/sap' +epprd_rg:cl_activate_fs(4.537):/usr/sap[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(4.537):/usr/sap[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(4.563):/usr/sap[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(4.566):/usr/sap[amlog_trace:319] DATE=2023-01-28T17:10:44.501258 +epprd_rg:cl_activate_fs(4.566):/usr/sap[amlog_trace:320] echo '|2023-01-28T17:10:44.501258|INFO: Activating Filesystem|/usr/sap' +epprd_rg:cl_activate_fs(4.566):/usr/sap[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(4.566):/usr/sap[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(4.568):/usr/sap[fs_mount:162] : Try to mount filesystem /usr/sap at Jan 28 17:10:44.000 +epprd_rg:cl_activate_fs(4.568):/usr/sap[fs_mount:163] mount /usr/sap +epprd_rg:cl_activate_fs(4.579):/usr/sap[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(4.579):/usr/sap[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(4.579):/usr/sap[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(4.579):/usr/sap[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/usr/sap' +epprd_rg:cl_activate_fs(4.579):/usr/sap[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(4.580):/usr/sap[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(4.606):/usr/sap[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(4.608):/usr/sap[amlog_trace:319] DATE=2023-01-28T17:10:44.543965 +epprd_rg:cl_activate_fs(4.608):/usr/sap[amlog_trace:320] echo '|2023-01-28T17:10:44.543965|INFO: Activating Filesystems completed|/usr/sap' +epprd_rg:cl_activate_fs(4.608):/usr/sap[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(4.608):/usr/sap[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(4.608):/usr/sap[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(4.608):/usr/sap[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(4.608):/usr/sap[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(4.608):/usr/sap[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(4.608):/usr/sap[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(4.608):/usr/sap[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(4.608):/usr/sap[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(4.608):/usr/sap[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(4.608):/usr/sap[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(4.609):/usr/sap[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(4.612):/usr/sap[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(4.614):/usr/sap[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(4.614):/usr/sap[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(4.614):/usr/sap[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(4.614):/usr/sap[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(4.614):/usr/sap[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(4.615):/usr/sap[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(4.615):/usr/sap[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(4.615):/usr/sap[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(4.492):/usr/sap[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.2 \n\t lvname = saplv \n\t label = /usr/sap \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false \n\t time created = Sat Dec 17 14:46:37 2022\n \t time modified = Sat Dec 17 14:48:05 2022\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(4.615):/usr/sap[fs_mount:249] chfs -a mountguard=yes /usr/sap +epprd_rg:cl_activate_fs(4.615):/usr/sap[fs_mount:249] CLUSTER_OVERRIDE=yes /usr/sap is now guarded against concurrent mounts. +epprd_rg:cl_activate_fs(4.763):/usr/sap[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(4.763):/usr/sap[activate_fs_process_group:543] unset PS4_LOOP PS4_TIMER +epprd_rg:cl_activate_fs[activate_fs_process_group:546] : Allow any background mount operations to finish +epprd_rg:cl_activate_fs[activate_fs_process_group:548] wait +epprd_rg:cl_activate_fs[activate_fs_process_group:550] : Read cluster level Preferread read option +epprd_rg:cl_activate_fs[activate_fs_process_group:552] clodmget -n -f lvm_preferred_read HACMPcluster +epprd_rg:cl_activate_fs[activate_fs_process_group:552] cluster_pref_read=roundrobin +epprd_rg:cl_activate_fs[activate_fs_process_group:555] : Looping all file systems to update preferred read option of each lv. +epprd_rg:cl_activate_fs[activate_fs_process_group:556] : By referring VG level preferred_read option or cluster level Preferred read option +epprd_rg:cl_activate_fs[activate_fs_process_group:560] lsfs -c /board_org +epprd_rg:cl_activate_fs[activate_fs_process_group:560] 2>& 1 +epprd_rg:cl_activate_fs[activate_fs_process_group:560] FS_info=$'+epprd_rg:cl_activate_fs[activate_fs_process_group:560] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs[activate_fs_process_group:561] RC=0 +epprd_rg:cl_activate_fs[activate_fs_process_group:562] (( 0 != 0 )) +epprd_rg:cl_activate_fs[activate_fs_process_group:574] print -- $'+epprd_rg:cl_activate_fs[activate_fs_process_group:560] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs[activate_fs_process_group:574] tail -1 +epprd_rg:cl_activate_fs[activate_fs_process_group:574] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs[activate_fs_process_group:574] IFS=: +epprd_rg:cl_activate_fs[activate_fs_process_group:575] LV_name=boardlv +epprd_rg:cl_activate_fs[activate_fs_process_group:577] grep -w 'VOLUME GROUP' +epprd_rg:cl_activate_fs[activate_fs_process_group:577] lslv -L boardlv +epprd_rg:cl_activate_fs[activate_fs_process_group:577] LC_ALL=C +epprd_rg:cl_activate_fs[activate_fs_process_group:577] volume_group='LOGICAL VOLUME: boardlv VOLUME GROUP: datavg' +epprd_rg:cl_activate_fs[activate_fs_process_group:578] volume_group=datavg +epprd_rg:cl_activate_fs[activate_fs_process_group:579] volume_group=datavg +epprd_rg:cl_activate_fs[activate_fs_process_group:581] clodmget -n -f group -q name='VOLUME_GROUP and value=datavg' HACMPresource +epprd_rg:cl_activate_fs[activate_fs_process_group:581] RGName=epprd_rg +epprd_rg:cl_activate_fs[activate_fs_process_group:584] : Get the Preferred storage read option for this VG and perform chlv command +epprd_rg:cl_activate_fs[activate_fs_process_group:586] clodmget -n -f value -q name='LVM_PREFERRED_READ and volume_group=datavg' HACMPvolumegroup +epprd_rg:cl_activate_fs[activate_fs_process_group:586] 2> /dev/null +epprd_rg:cl_activate_fs[activate_fs_process_group:586] PreferredReadOption='' +epprd_rg:cl_activate_fs[activate_fs_process_group:587] [[ -z '' ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:589] PreferredReadOption=roundrobin +epprd_rg:cl_activate_fs[activate_fs_process_group:590] [[ -z roundrobin ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:590] [[ roundrobin == roundrobin ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:593] : Both VG level and Cluster level LVM Preferred Read option chosen as roundrobin. +epprd_rg:cl_activate_fs[activate_fs_process_group:595] chlv -R 0 boardlv +epprd_rg:cl_activate_fs[activate_fs_process_group:596] (( 0 != 0 )) +epprd_rg:cl_activate_fs[activate_fs_process_group:600] break +epprd_rg:cl_activate_fs[activate_fs_process_group:670] : Update the resource manager with the state of the operation +epprd_rg:cl_activate_fs[activate_fs_process_group:672] ALLNOERROR=All_non_error_filesystems +epprd_rg:cl_activate_fs[activate_fs_process_group:673] cl_RMupdate resource_up All_non_error_filesystems cl_activate_fs 2023-01-28T17:10:44.996320 2023-01-28T17:10:45.000558 +epprd_rg:cl_activate_fs[activate_fs_process_group:676] : And harvest any status from the background mount operations +epprd_rg:cl_activate_fs[activate_fs_process_group:678] [[ -f /tmp/epprd_rg_activate_fs.tmp26739098 ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:688] return 0 +epprd_rg:cl_activate_fs[activate_fs_process_resources:767] RC=0 +epprd_rg:cl_activate_fs[activate_fs_process_resources:768] (( 0 != 0 && 0 == 0 )) +epprd_rg:cl_activate_fs[activate_fs_process_resources:772] RG_FILE_SYSTEMS='' +epprd_rg:cl_activate_fs[activate_fs_process_resources:776] return 0 +epprd_rg:cl_activate_fs[851] STATUS=0 +epprd_rg:cl_activate_fs[873] return 0 +epprd_rg:process_resources(10.654)[process_file_systems:2648] RC=0 +epprd_rg:process_resources(10.654)[process_file_systems:2649] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources(10.655)[process_file_systems:2661] (( 0 != 0 )) +epprd_rg:process_resources(10.655)[process_file_systems:2687] return 0 +epprd_rg:process_resources(10.655)[3483] RC=0 +epprd_rg:process_resources(10.655)[3485] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources(10.655)[3324] true +epprd_rg:process_resources(10.655)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(10.655)[3328] set -a +epprd_rg:process_resources(10.655)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T17:10:45.014017 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(10.674)[3329] eval JOB_TYPE=SYNC_VGS ACTION=ACQUIRE VOLUME_GROUPS='"datavg"' RESOURCE_GROUPS='"epprd_rg' '"' +epprd_rg:process_resources(10.674)[1] JOB_TYPE=SYNC_VGS +epprd_rg:process_resources(10.674)[1] ACTION=ACQUIRE +epprd_rg:process_resources(10.674)[1] VOLUME_GROUPS=datavg +epprd_rg:process_resources(10.674)[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources(10.674)[3330] RC=0 +epprd_rg:process_resources(10.674)[3331] set +a +epprd_rg:process_resources(10.674)[3333] (( 0 != 0 )) +epprd_rg:process_resources(10.674)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(10.674)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(10.674)[3343] export GROUPNAME +epprd_rg:process_resources(10.674)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(10.674)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(10.674)[3360] [[ SYNC_VGS == RELEASE ]] +epprd_rg:process_resources(10.674)[3360] [[ SYNC_VGS == ONLINE ]] +epprd_rg:process_resources(10.674)[3474] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources(10.674)[3476] sync_volume_groups +epprd_rg:process_resources(10.674)[sync_volume_groups:2699] PS4_FUNC=sync_volume_groups +epprd_rg:process_resources(10.674)[sync_volume_groups:2699] typeset PS4_FUNC +epprd_rg:process_resources(10.674)[sync_volume_groups:2700] [[ high == high ]] +epprd_rg:process_resources(10.674)[sync_volume_groups:2700] set -x +epprd_rg:process_resources(10.674)[sync_volume_groups:2701] STAT=0 +epprd_rg:process_resources(10.674)[sync_volume_groups:2704] export GROUPNAME +epprd_rg:process_resources(10.675)[sync_volume_groups:2706] get_list_head datavg +epprd_rg:process_resources(10.675)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(10.675)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(10.675)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(10.675)[get_list_head:60] set -x +epprd_rg:process_resources(10.676)[get_list_head:61] echo datavg +epprd_rg:process_resources(10.678)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(10.678)[get_list_head:61] IFS=: +epprd_rg:process_resources(10.679)[get_list_head:62] echo datavg +epprd_rg:process_resources(10.680)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(10.678)[sync_volume_groups:2706] read LIST_OF_VOLUME_GROUPS_FOR_RG +epprd_rg:process_resources(10.685)[sync_volume_groups:2707] get_list_tail datavg +epprd_rg:process_resources(10.685)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(10.686)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(10.686)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(10.686)[get_list_tail:68] set -x +epprd_rg:process_resources(10.686)[get_list_tail:69] echo datavg +epprd_rg:process_resources(10.689)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(10.689)[get_list_tail:69] IFS=: +epprd_rg:process_resources(10.689)[get_list_tail:70] echo +epprd_rg:process_resources(10.688)[sync_volume_groups:2707] read VOLUME_GROUPS +epprd_rg:process_resources(10.690)[sync_volume_groups:2710] : Sync the active volume groups +epprd_rg:process_resources(10.691)[sync_volume_groups:2712] lsvg -L -o +epprd_rg:process_resources(10.691)[sync_volume_groups:2712] 2> /tmp/lsvg.err +epprd_rg:process_resources(10.694)[sync_volume_groups:2712] sort +epprd_rg:process_resources(10.695)[sync_volume_groups:2712] 1> /tmp/lsvg.out.23593416 +epprd_rg:process_resources(10.702)[sync_volume_groups:2713] echo datavg +epprd_rg:process_resources(10.705)[sync_volume_groups:2714] sort +epprd_rg:process_resources(10.707)[sync_volume_groups:2713] tr ' ' '\n' +epprd_rg:process_resources(10.709)[sync_volume_groups:2714] comm -12 /tmp/lsvg.out.23593416 - +epprd_rg:process_resources(10.715)[sync_volume_groups:2718] [[ -s /tmp/lsvg.err ]] +epprd_rg:process_resources(10.715)[sync_volume_groups:2723] rm -f /tmp/lsvg.out.23593416 /tmp/lsvg.err +epprd_rg:process_resources(10.717)[sync_volume_groups:2716] cl_sync_vgs datavg +epprd_rg:cl_sync_vgs[303] version=1.24.1.4 +epprd_rg:cl_sync_vgs[306] (( 1 == 0 )) +epprd_rg:cl_sync_vgs[312] : syncing 4 stale PPs at a time seems to be a win most of the time, but +epprd_rg:cl_sync_vgs[313] : we honor the NUM_PARALLEL_LPS value from /etc/environment, as does +epprd_rg:cl_sync_vgs[314] : syncvg. +epprd_rg:cl_sync_vgs[316] syncflag='' +epprd_rg:cl_sync_vgs[316] export syncflag +epprd_rg:cl_sync_vgs[317] PS4_LOOP='' +epprd_rg:cl_sync_vgs[317] export PS4_LOOP +epprd_rg:cl_sync_vgs[318] typeset -i npl +epprd_rg:cl_sync_vgs[319] grep -q ^NUM_PARALLEL_LPS= /etc/environment +epprd_rg:process_resources(10.734)[sync_volume_groups:2732] unset AM_SYNC_CALLED_BY +epprd_rg:process_resources(10.734)[sync_volume_groups:2734] return 0 +epprd_rg:process_resources(10.734)[3324] true +epprd_rg:process_resources(10.734)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(10.734)[3328] set -a +epprd_rg:cl_sync_vgs[321] syncflag=-P4 +epprd_rg:cl_sync_vgs[328] echo 'NOTE: While the sync is going on, volume group can be used' NOTE: While the sync is going on, volume group can be used +epprd_rg:cl_sync_vgs[331] : For GLVM volume groups, read PARALLEL LPS count from HACMPresource if it is set from GUI, +epprd_rg:cl_sync_vgs[332] : else, read from environment variables, if it is not set use 32 as default value. +epprd_rg:cl_sync_vgs[334] clodmget -q name='GMVG_REP_RESOURCE and value=datavg' -f group HACMPresource +epprd_rg:cl_sync_vgs[334] 2> /dev/null +epprd_rg:process_resources(10.734)[3329] clRGPA +epprd_rg:cl_sync_vgs[334] glvm_rg='' +epprd_rg:cl_sync_vgs[335] [[ -n '' ]] +epprd_rg:cl_sync_vgs[353] check_sync datavg +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:76] typeset vg_name +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:77] typeset vgid +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:78] typeset disklist +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:79] typeset lv_name +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:80] typeset -li stale_count +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:81] typeset -li mode +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:82] RC=0 +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:82] typeset -li RC +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:83] typeset site_node_list +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:84] typeset site_choice +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:86] vg_name=datavg +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:87] disklist='' +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:89] getlvodm -v datavg +epprd_rg:cl_sync_vgs(0.030):datavg[check_sync:89] vgid=00c44af100004b00000001851e9dc053 +epprd_rg:cl_sync_vgs(0.030):datavg[check_sync:92] : find disks in the VG that LVM thinks are inaccessable +epprd_rg:cl_sync_vgs(0.030):datavg[check_sync:94] lsvg -L -p datavg +epprd_rg:cl_sync_vgs(0.030):datavg[check_sync:94] LC_ALL=C +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T17:10:45.103825 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(10.757)[3329] eval JOB_TYPE=EXPORT_FILESYSTEMS ACTION=ACQUIRE EXPORT_FILE_SYSTEMS='"/board_org"' EXPORT_FILE_SYSTEMS_V4='""' RESOURCE_GROUPS='"epprd_rg' '"' STABLE_STORAGE_PATH='""' IP_LABELS='"epprd:epprda:epprds"' DAEMONS='"NFS' 'RPCLOCKD"' +epprd_rg:process_resources(10.757)[1] JOB_TYPE=EXPORT_FILESYSTEMS +epprd_rg:process_resources(10.757)[1] ACTION=ACQUIRE +epprd_rg:process_resources(10.757)[1] EXPORT_FILE_SYSTEMS=/board_org +epprd_rg:process_resources(10.757)[1] EXPORT_FILE_SYSTEMS_V4='' +epprd_rg:process_resources(10.757)[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources(10.757)[1] STABLE_STORAGE_PATH='' +epprd_rg:process_resources(10.757)[1] IP_LABELS=epprd:epprda:epprds +epprd_rg:process_resources(10.757)[1] DAEMONS='NFS RPCLOCKD' +epprd_rg:process_resources(10.757)[3330] RC=0 +epprd_rg:process_resources(10.757)[3331] set +a +epprd_rg:process_resources(10.757)[3333] (( 0 != 0 )) +epprd_rg:process_resources(10.758)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(10.758)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(10.758)[3343] export GROUPNAME +epprd_rg:process_resources(10.758)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(10.758)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(10.758)[3360] [[ EXPORT_FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources(10.758)[3360] [[ EXPORT_FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources(10.758)[3595] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources(10.758)[3597] export_filesystems +epprd_rg:process_resources(10.758)[export_filesystems:1621] PS4_FUNC=export_filesystems +epprd_rg:process_resources(10.758)[export_filesystems:1621] typeset PS4_FUNC +epprd_rg:process_resources(10.758)[export_filesystems:1622] [[ high == high ]] +epprd_rg:process_resources(10.758)[export_filesystems:1622] set -x +epprd_rg:process_resources(10.758)[export_filesystems:1623] STAT=0 +epprd_rg:process_resources(10.758)[export_filesystems:1624] NFSSTOPPED=0 +epprd_rg:process_resources(10.758)[export_filesystems:1629] [[ NFS == RPCLOCKD ]] +epprd_rg:process_resources(10.758)[export_filesystems:1629] [[ RPCLOCKD == RPCLOCKD ]] +epprd_rg:process_resources(10.758)[export_filesystems:1631] stopsrc -s rpc.lockd 0513-044 The rpc.lockd Subsystem was requested to stop. +epprd_rg:process_resources(10.771)[export_filesystems:1633] touch /tmp/.RPCLOCKDSTOPPED +epprd_rg:process_resources(10.780)[export_filesystems:1638] : For NFSv4, cl_export_fs will use STABLE_STORAGE_PATH, which is set by +epprd_rg:process_resources(10.780)[export_filesystems:1639] : clRGPA and can have colon-separated values for multiple RGs. +epprd_rg:process_resources(10.780)[export_filesystems:1640] : We will save off clRGPA values in stable_storage_path and then extract +epprd_rg:process_resources(10.780)[export_filesystems:1641] : each RG into STABLE_STORAGE_PATH for cl_unexport_fs. +epprd_rg:process_resources(10.780)[export_filesystems:1643] stable_storage_path='' +epprd_rg:process_resources(10.780)[export_filesystems:1643] typeset stable_storage_path +epprd_rg:process_resources(10.780)[export_filesystems:1645] export NFSSTOPPED +epprd_rg:process_resources(10.780)[export_filesystems:1650] export GROUPNAME +epprd_rg:process_resources(10.781)[export_filesystems:1652] get_list_head /board_org +epprd_rg:process_resources(10.781)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(10.781)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(10.781)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(10.781)[get_list_head:60] set -x +epprd_rg:process_resources(10.782)[get_list_head:61] echo /board_org +epprd_rg:process_resources(10.784)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(10.784)[get_list_head:61] IFS=: +epprd_rg:process_resources(10.785)[get_list_head:62] echo /board_org +epprd_rg:process_resources(10.787)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(10.784)[export_filesystems:1652] read LIST_OF_EXPORT_FILE_SYSTEMS_FOR_RG +epprd_rg:process_resources(10.793)[export_filesystems:1653] get_list_tail /board_org +epprd_rg:process_resources(10.793)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(10.793)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(10.793)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(10.793)[get_list_tail:68] set -x +epprd_rg:process_resources(10.798)[get_list_tail:69] echo /board_org +epprd_rg:process_resources(10.798)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(10.798)[get_list_tail:69] IFS=: +epprd_rg:process_resources(10.800)[get_list_tail:70] echo +epprd_rg:process_resources(10.797)[export_filesystems:1653] read EXPORT_FILE_SYSTEMS +epprd_rg:process_resources(10.802)[export_filesystems:1654] get_list_head +epprd_rg:process_resources(10.803)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(10.803)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(10.803)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(10.803)[get_list_head:60] set -x +epprd_rg:process_resources(10.804)[get_list_head:61] echo +epprd_rg:process_resources(10.805)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(10.805)[get_list_head:61] IFS=: +epprd_rg:process_resources(10.806)[get_list_head:62] echo +epprd_rg:process_resources(10.808)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(10.805)[export_filesystems:1654] read LIST_OF_EXPORT_FILE_SYSTEMS_V4_FOR_RG +epprd_rg:process_resources(10.813)[export_filesystems:1655] get_list_tail +epprd_rg:process_resources(10.813)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(10.814)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(10.814)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(10.814)[get_list_tail:68] set -x +epprd_rg:process_resources(10.816)[get_list_tail:69] echo +epprd_rg:process_resources(10.815)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(10.815)[get_list_tail:69] IFS=: +epprd_rg:process_resources(10.817)[get_list_tail:70] echo +epprd_rg:process_resources(10.815)[export_filesystems:1655] read EXPORT_FILE_SYSTEMS_V4 +epprd_rg:process_resources(10.820)[export_filesystems:1656] get_list_head +epprd_rg:process_resources(10.820)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(10.820)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(10.820)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(10.820)[get_list_head:60] set -x +epprd_rg:process_resources(10.821)[get_list_head:61] echo +epprd_rg:process_resources(10.824)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(10.824)[get_list_head:61] IFS=: +epprd_rg:process_resources(10.825)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(10.827)[get_list_head:62] echo +epprd_rg:process_resources(10.823)[export_filesystems:1656] read STABLE_STORAGE_PATH +epprd_rg:process_resources(10.829)[export_filesystems:1657] get_list_tail +epprd_rg:process_resources(10.829)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(10.829)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(10.829)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(10.829)[get_list_tail:68] set -x +epprd_rg:process_resources(10.830)[get_list_tail:69] echo +epprd_rg:process_resources(10.834)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(10.834)[get_list_tail:69] IFS=: +epprd_rg:process_resources(10.834)[get_list_tail:70] echo +epprd_rg:process_resources(10.833)[export_filesystems:1657] read stable_storage_path +epprd_rg:process_resources(10.834)[export_filesystems:1659] cl_export_fs epprd:epprda:epprds /board_org '' +epprd_rg:cl_export_fs[102] version=%I% +epprd_rg:cl_export_fs[105] . /usr/es/sbin/cluster/events/utils/cl_nfs_utils +epprd_rg:cl_export_fs[98] PROGNAME=cl_export_fs +epprd_rg:cl_export_fs[99] [[ high == high ]] +epprd_rg:cl_export_fs[101] set -x +epprd_rg:cl_export_fs[102] version=%I +epprd_rg:cl_export_fs[105] cl_exports_data='' +epprd_rg:cl_export_fs[105] typeset cl_exports_data +epprd_rg:cl_export_fs[106] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_export_fs[107] HOST=epprd:epprda:epprds +epprd_rg:cl_export_fs[108] EXPORT_V3=/board_org +epprd_rg:cl_export_fs[109] EXPORT_V4='' +epprd_rg:cl_export_fs[111] STATUS=0 +epprd_rg:cl_export_fs[113] LIMIT=60 +epprd_rg:cl_export_fs[113] WAIT=1 +epprd_rg:cl_export_fs[113] TRY=0 +epprd_rg:cl_export_fs[113] typeset -li LIMIT WAIT TRY +epprd_rg:cl_export_fs[115] PROC_RES=false +epprd_rg:cl_export_fs[118] : If JOB_TYPE is set, and it does not equal to GROUP, then +epprd_rg:cl_export_fs[119] : we are processing for process_resources +epprd_rg:cl_export_fs[121] [[ EXPORT_FILESYSTEMS != 0 ]] +epprd_rg:cl_export_fs[121] [[ EXPORT_FILESYSTEMS != GROUP ]] +epprd_rg:cl_export_fs[122] PROC_RES=true +epprd_rg:cl_export_fs[125] set -u +epprd_rg:cl_export_fs[127] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_export_fs[129] (( 3 < 2 || 3 > 3 )) +epprd_rg:cl_export_fs[142] DARE_EVENT=reconfig_resource_acquire +epprd_rg:cl_export_fs[145] : Check memory to see if NFSv4 exports have been configured. +epprd_rg:cl_export_fs[147] export_v4='' +epprd_rg:cl_export_fs[148] [[ -z '' ]] +epprd_rg:cl_export_fs[148] [[ rg_move == reconfig_resource_acquire ]] +epprd_rg:cl_export_fs[158] : If we do not have NFSv4 exports configured, then determine +epprd_rg:cl_export_fs[159] : the protocol versions from the HACMP exports file. +epprd_rg:cl_export_fs[161] [[ -z '' ]] +epprd_rg:cl_export_fs[161] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[227] /usr/sbin/bootinfo -K +epprd_rg:cl_export_fs[227] KERNEL_BITS=64 +epprd_rg:cl_export_fs[229] subsystems='nfsd rpc.mountd' +epprd_rg:cl_export_fs[230] [[ -n '' ]] +epprd_rg:cl_export_fs[233] : Special processing for cross mounts of EFS keys +epprd_rg:cl_export_fs[234] : The overmount of /var/efs must be removed prior +epprd_rg:cl_export_fs[235] : to stopping or restarting NFS, since the SRC +epprd_rg:cl_export_fs[236] : operations will attempt to check the EFS enablement. +epprd_rg:cl_export_fs[238] mount +epprd_rg:cl_export_fs[238] grep -w /var/efs +epprd_rg:cl_export_fs[238] mounted_info='' +epprd_rg:cl_export_fs[239] [[ -n '' ]] +epprd_rg:cl_export_fs[295] : Kill and restart everything in '"nfsd' 'rpc.mountd"' +epprd_rg:cl_export_fs[299] : Kill nfsd, and restart it below +epprd_rg:cl_export_fs[306] [[ nfsd == nfsd ]] +epprd_rg:cl_export_fs[307] [[ 64 == 64 ]] +epprd_rg:cl_export_fs[307] [[ -x /usr/sbin/nfs4smctl ]] +epprd_rg:cl_export_fs[308] [[ ! -s /etc/xtab ]] +epprd_rg:cl_export_fs[311] clcheck_server nfsd +epprd_rg:clcheck_server[118] [[ high == high ]] +epprd_rg:clcheck_server[118] version=1.10.4.2 +epprd_rg:clcheck_server[119] cl_get_path +epprd_rg:clcheck_server[119] HA_DIR=es +epprd_rg:clcheck_server[121] SERVER=nfsd +epprd_rg:clcheck_server[122] STATUS=0 +epprd_rg:clcheck_server[123] FATAL_ERROR=255 +epprd_rg:clcheck_server[124] retries=0 +epprd_rg:clcheck_server[124] typeset -li retries +epprd_rg:clcheck_server[126] [[ -n nfsd ]] +epprd_rg:clcheck_server[131] lssrc -s nfsd +epprd_rg:clcheck_server[131] LC_ALL=C +epprd_rg:clcheck_server[131] grep 'not on file' +epprd_rg:clcheck_server[131] wc -l +epprd_rg:clcheck_server[131] rc=' 0' +epprd_rg:clcheck_server[133] (( 0 == 1 )) +epprd_rg:clcheck_server[143] [[ 0 =~ 3 ]] +epprd_rg:clcheck_server[147] lssrc -s nfsd +epprd_rg:clcheck_server[147] 1> /dev/null 2> /dev/null +epprd_rg:cl_sync_vgs(0.157):datavg[check_sync:94] disklist=$'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:cl_sync_vgs(0.158):datavg[check_sync:95] print -- $'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:cl_sync_vgs(0.160):datavg[check_sync:95] grep -w missing +epprd_rg:cl_sync_vgs(0.162):datavg[check_sync:95] cut -f1 '-d ' +epprd_rg:cl_sync_vgs(0.165):datavg[check_sync:95] missing_disklist='' +epprd_rg:cl_sync_vgs(0.166):datavg[check_sync:96] print -- $'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:clcheck_server[161] lssrc -s nfsd +epprd_rg:clcheck_server[161] LC_ALL=C +epprd_rg:cl_sync_vgs(0.171):datavg[check_sync:96] grep -w removed +epprd_rg:clcheck_server[161] egrep 'stop|active' +epprd_rg:cl_sync_vgs(0.174):datavg[check_sync:96] cut -f1 '-d ' +epprd_rg:cl_sync_vgs(0.178):datavg[check_sync:96] removed_disklist='' +epprd_rg:cl_sync_vgs(0.178):datavg[check_sync:100] : Proceeed if there are some disks that LVM thinks are inaccessable +epprd_rg:cl_sync_vgs(0.178):datavg[check_sync:102] [[ -n '' ]] +epprd_rg:cl_sync_vgs(0.178):datavg[check_sync:102] [[ -n '' ]] +epprd_rg:cl_sync_vgs(0.178):datavg[check_sync:196] : sync if any LVs in the VG that have stale partitions +epprd_rg:cl_sync_vgs(0.178):datavg[check_sync:198] (( 0 == 0 )) +epprd_rg:cl_sync_vgs(0.178):datavg[check_sync:201] : A status of 2,3,5 or 7 indicates the presence of dirty and/or stale partitions +epprd_rg:cl_sync_vgs(0.178):datavg[check_sync:213] is_start_logged=0 +epprd_rg:cl_sync_vgs(0.178):datavg[check_sync:218] at_least_one_sync_success=0 +epprd_rg:cl_sync_vgs(0.179):datavg[check_sync:219] lqueryvg -g 00c44af100004b00000001851e9dc053 -L +epprd_rg:clcheck_server[161] check_if_down='' +epprd_rg:clcheck_server[166] [[ -z '' ]] +epprd_rg:clcheck_server[171] sleep 1 +epprd_rg:cl_sync_vgs(0.181):datavg[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.197):datavg[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.197):datavg.epprdaloglv[check_sync:221] PS4_LOOP=datavg.epprdaloglv +epprd_rg:cl_sync_vgs(0.197):datavg.epprdaloglv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.197):datavg.epprdaloglv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.197):datavg.epprdaloglv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.197):datavg.epprdaloglv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.197):datavg.epprdaloglv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.197):datavg.saplv[check_sync:221] PS4_LOOP=datavg.saplv +epprd_rg:cl_sync_vgs(0.197):datavg.saplv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.198):datavg.saplv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.198):datavg.saplv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.198):datavg.saplv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.198):datavg.saplv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.198):datavg.sapmntlv[check_sync:221] PS4_LOOP=datavg.sapmntlv +epprd_rg:cl_sync_vgs(0.198):datavg.sapmntlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.198):datavg.sapmntlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.198):datavg.sapmntlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.198):datavg.sapmntlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.198):datavg.sapmntlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.198):datavg.oraclelv[check_sync:221] PS4_LOOP=datavg.oraclelv +epprd_rg:cl_sync_vgs(0.198):datavg.oraclelv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.198):datavg.oraclelv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.198):datavg.oraclelv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.198):datavg.oraclelv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.198):datavg.oraclelv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.198):datavg.epplv[check_sync:221] PS4_LOOP=datavg.epplv +epprd_rg:cl_sync_vgs(0.198):datavg.epplv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.198):datavg.epplv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.198):datavg.epplv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.198):datavg.epplv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.198):datavg.epplv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.198):datavg.oraarchlv[check_sync:221] PS4_LOOP=datavg.oraarchlv +epprd_rg:cl_sync_vgs(0.198):datavg.oraarchlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.198):datavg.oraarchlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.198):datavg.oraarchlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.198):datavg.oraarchlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.198):datavg.oraarchlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.198):datavg.sapdata1lv[check_sync:221] PS4_LOOP=datavg.sapdata1lv +epprd_rg:cl_sync_vgs(0.198):datavg.sapdata1lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.198):datavg.sapdata1lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.198):datavg.sapdata1lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.198):datavg.sapdata1lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.198):datavg.sapdata1lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.198):datavg.sapdata2lv[check_sync:221] PS4_LOOP=datavg.sapdata2lv +epprd_rg:cl_sync_vgs(0.198):datavg.sapdata2lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.198):datavg.sapdata2lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.198):datavg.sapdata2lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.198):datavg.sapdata2lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.198):datavg.sapdata2lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.198):datavg.sapdata3lv[check_sync:221] PS4_LOOP=datavg.sapdata3lv +epprd_rg:cl_sync_vgs(0.198):datavg.sapdata3lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.198):datavg.sapdata3lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.198):datavg.sapdata3lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.198):datavg.sapdata3lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.198):datavg.sapdata3lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.198):datavg.sapdata4lv[check_sync:221] PS4_LOOP=datavg.sapdata4lv +epprd_rg:cl_sync_vgs(0.198):datavg.sapdata4lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.198):datavg.sapdata4lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.198):datavg.sapdata4lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.198):datavg.sapdata4lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.198):datavg.sapdata4lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.198):datavg.boardlv[check_sync:221] PS4_LOOP=datavg.boardlv +epprd_rg:cl_sync_vgs(0.198):datavg.boardlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.198):datavg.boardlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.198):datavg.boardlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.198):datavg.boardlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.198):datavg.boardlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.198):datavg.origlogAlv[check_sync:221] PS4_LOOP=datavg.origlogAlv +epprd_rg:cl_sync_vgs(0.198):datavg.origlogAlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.198):datavg.origlogAlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.198):datavg.origlogAlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.198):datavg.origlogAlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.198):datavg.origlogAlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.198):datavg.origlogBlv[check_sync:221] PS4_LOOP=datavg.origlogBlv +epprd_rg:cl_sync_vgs(0.198):datavg.origlogBlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.198):datavg.origlogBlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.198):datavg.origlogBlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.198):datavg.origlogBlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.198):datavg.origlogBlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.198):datavg.mirrlogAlv[check_sync:221] PS4_LOOP=datavg.mirrlogAlv +epprd_rg:cl_sync_vgs(0.198):datavg.mirrlogAlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.198):datavg.mirrlogAlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.198):datavg.mirrlogAlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.198):datavg.mirrlogAlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.198):datavg.mirrlogAlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.198):datavg.mirrlogBlv[check_sync:221] PS4_LOOP=datavg.mirrlogBlv +epprd_rg:cl_sync_vgs(0.198):datavg.mirrlogBlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.198):datavg.mirrlogBlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.198):datavg.mirrlogBlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.198):datavg.mirrlogBlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.198):datavg.mirrlogBlv[check_sync:268] [[ -n RG_MOVE ]] +epprd_rg:cl_sync_vgs(0.198):datavg.mirrlogBlv[check_sync:268] (( 0 == 1 )) +epprd_rg:cl_sync_vgs[355] exit 0 +epprd_rg:clcheck_server[172] lssrc -s nfsd +epprd_rg:clcheck_server[172] LC_ALL=C +epprd_rg:clcheck_server[172] egrep 'stop|active' +epprd_rg:clcheck_server[172] check_if_down='' +epprd_rg:clcheck_server[173] [[ -z '' ]] +epprd_rg:clcheck_server[177] return 0 +epprd_rg:cl_export_fs[313] startsrc -s nfsd 0513-059 The nfsd Subsystem has been started. Subsystem PID is 28377402. +epprd_rg:cl_export_fs[314] rc=0 +epprd_rg:cl_export_fs[315] (( 0 == 0 )) +epprd_rg:cl_export_fs[317] sleep 3 +epprd_rg:cl_export_fs[318] lssrc -s nfsd +epprd_rg:cl_export_fs[318] LC_ALL=C +epprd_rg:cl_export_fs[318] tail +2 +epprd_rg:cl_export_fs[318] subsys_state=' nfsd nfs 28377402 active' +epprd_rg:cl_export_fs[321] (( 0 != 0 )) +epprd_rg:cl_export_fs[321] print -- ' nfsd nfs 28377402 active' +epprd_rg:cl_export_fs[321] grep -qw active +epprd_rg:cl_export_fs[329] : nfsv4 daemon not stopped due to existing mounts +epprd_rg:cl_export_fs[330] : Turn on NFSv4 grace periods and ignore any errors. +epprd_rg:cl_export_fs[332] chnfs -I -g on -x 1 +epprd_rg:cl_export_fs[332] ODMDIR=/etc/objrepos 0513-077 Subsystem has been changed. 0513-077 Subsystem has been changed. +epprd_rg:cl_export_fs[299] : Kill rpc.mountd, and restart it below +epprd_rg:cl_export_fs[306] [[ rpc.mountd == nfsd ]] +epprd_rg:cl_export_fs[336] : Friendly stop of rpc.mountd +epprd_rg:cl_export_fs[338] lssrc -s rpc.mountd +epprd_rg:cl_export_fs[338] LC_ALL=C +epprd_rg:cl_export_fs[338] tail +2 +epprd_rg:cl_export_fs[338] grep -qw active +epprd_rg:cl_export_fs[341] : Now, wait for rpc.mountd to die +epprd_rg:cl_export_fs[343] (( TRY=0)) +epprd_rg:cl_export_fs[343] (( 0 < 60)) +epprd_rg:cl_export_fs[345] lssrc -s rpc.mountd +epprd_rg:cl_export_fs[345] LC_ALL=C +epprd_rg:cl_export_fs[345] tail +2 +epprd_rg:cl_export_fs[345] subsys_state=' rpc.mountd nfs inoperative' +epprd_rg:cl_export_fs[346] print -- ' rpc.mountd nfs inoperative' +epprd_rg:cl_export_fs[346] grep -qw inoperative +epprd_rg:cl_export_fs[348] [[ high == high ]] +epprd_rg:cl_export_fs[348] set -x +epprd_rg:cl_export_fs[349] subsys_state=inoperative +epprd_rg:cl_export_fs[350] break +epprd_rg:cl_export_fs[356] [[ high == high ]] +epprd_rg:cl_export_fs[356] set -x +epprd_rg:cl_export_fs[358] [[ inoperative != inoperative ]] +epprd_rg:cl_export_fs[382] : If stopsrc has failed to stop rpc.mountd, +epprd_rg:cl_export_fs[383] : use a real kill on the daemon +epprd_rg:cl_export_fs[385] ps -eo comm,pid +epprd_rg:cl_export_fs[385] grep -w rpc.mountd +epprd_rg:cl_export_fs[385] grep -vw grep +epprd_rg:cl_export_fs[385] read skip subsys_pid rest +epprd_rg:cl_export_fs[386] [[ '' == +([0-9]) ]] +epprd_rg:cl_export_fs[389] : If rpc.mountd has been stopped, +epprd_rg:cl_export_fs[390] : start it back up again. +epprd_rg:cl_export_fs[392] clcheck_server rpc.mountd +epprd_rg:clcheck_server[118] [[ high == high ]] +epprd_rg:clcheck_server[118] version=1.10.4.2 +epprd_rg:clcheck_server[119] cl_get_path +epprd_rg:clcheck_server[119] HA_DIR=es +epprd_rg:clcheck_server[121] SERVER=rpc.mountd +epprd_rg:clcheck_server[122] STATUS=0 +epprd_rg:clcheck_server[123] FATAL_ERROR=255 +epprd_rg:clcheck_server[124] retries=0 +epprd_rg:clcheck_server[124] typeset -li retries +epprd_rg:clcheck_server[126] [[ -n rpc.mountd ]] +epprd_rg:clcheck_server[131] lssrc -s rpc.mountd +epprd_rg:clcheck_server[131] LC_ALL=C +epprd_rg:clcheck_server[131] grep 'not on file' +epprd_rg:clcheck_server[131] wc -l +epprd_rg:clcheck_server[131] rc=' 0' +epprd_rg:clcheck_server[133] (( 0 == 1 )) +epprd_rg:clcheck_server[143] [[ 0 =~ 3 ]] +epprd_rg:clcheck_server[147] lssrc -s rpc.mountd +epprd_rg:clcheck_server[147] 1> /dev/null 2> /dev/null +epprd_rg:clcheck_server[161] lssrc -s rpc.mountd +epprd_rg:clcheck_server[161] LC_ALL=C +epprd_rg:clcheck_server[161] egrep 'stop|active' +epprd_rg:clcheck_server[161] check_if_down='' +epprd_rg:clcheck_server[166] [[ -z '' ]] +epprd_rg:clcheck_server[171] sleep 1 +epprd_rg:clcheck_server[172] lssrc -s rpc.mountd +epprd_rg:clcheck_server[172] LC_ALL=C +epprd_rg:clcheck_server[172] egrep 'stop|active' +epprd_rg:clcheck_server[172] check_if_down='' +epprd_rg:clcheck_server[173] [[ -z '' ]] +epprd_rg:clcheck_server[177] return 0 +epprd_rg:cl_export_fs[394] [[ rpc.mountd == nfsd ]] +epprd_rg:cl_export_fs[403] : Start rpc.mountd back up again +epprd_rg:cl_export_fs[405] startsrc -s rpc.mountd 0513-059 The rpc.mountd Subsystem has been started. Subsystem PID is 8257896. +epprd_rg:cl_export_fs[406] rc=0 +epprd_rg:cl_export_fs[407] (( 0 == 0 )) +epprd_rg:cl_export_fs[409] sleep 3 +epprd_rg:cl_export_fs[410] tail +2 +epprd_rg:cl_export_fs[410] lssrc -s rpc.mountd +epprd_rg:cl_export_fs[410] LC_ALL=C +epprd_rg:cl_export_fs[410] subsys_state=' rpc.mountd nfs 8257896 active' +epprd_rg:cl_export_fs[413] (( 0 != 0 )) +epprd_rg:cl_export_fs[413] print -- ' rpc.mountd nfs 8257896 active' +epprd_rg:cl_export_fs[413] grep -qw active +epprd_rg:cl_export_fs[431] : Set the NFSv4 nfsroot parameter. This must be set prior to any +epprd_rg:cl_export_fs[432] : NFS exports that use the exname option, and cannot be set to a new +epprd_rg:cl_export_fs[433] : value if any exname exports already exist. This is normally done +epprd_rg:cl_export_fs[434] : at IPL, but rc.nfs is not run at boot when HACMP is installed. +epprd_rg:cl_export_fs[436] [[ -n '' ]] +epprd_rg:cl_export_fs[438] hasrv='' +epprd_rg:cl_export_fs[440] [[ -z '' ]] +epprd_rg:cl_export_fs[442] query=name='STABLE_STORAGE_PATH AND group=epprd_rg' +epprd_rg:cl_export_fs[443] odmget -q name='STABLE_STORAGE_PATH AND group=epprd_rg' HACMPresource +epprd_rg:cl_export_fs[444] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_export_fs[443] STABLE_STORAGE_PATH='' +epprd_rg:cl_export_fs[447] [[ -z '' ]] +epprd_rg:cl_export_fs[449] STABLE_STORAGE_PATH=/var/adm/nfsv4.hacmp/epprd_rg +epprd_rg:cl_export_fs[452] [[ -z '' ]] +epprd_rg:cl_export_fs[454] query=name='STABLE_STORAGE_COOKIE AND group=epprd_rg' +epprd_rg:cl_export_fs[455] odmget -q name='STABLE_STORAGE_COOKIE AND group=epprd_rg' HACMPresource +epprd_rg:cl_export_fs[456] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_export_fs[455] STABLE_STORAGE_COOKIE='' +epprd_rg:cl_export_fs[459] [[ -n epprd_rg ]] +epprd_rg:cl_export_fs[461] odmget -q 'name = SERVICE_LABEL and group = epprd_rg' HACMPresource +epprd_rg:cl_export_fs[462] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:cl_export_fs[461] SERVICE_LABEL=epprd +epprd_rg:cl_export_fs[465] primary epprd +epprd_rg:cl_export_fs[primary:55] echo epprd +epprd_rg:cl_export_fs[465] primary=epprd +epprd_rg:cl_export_fs[466] secondary epprd +epprd_rg:cl_export_fs[secondary:74] [[ -n epprd ]] +epprd_rg:cl_export_fs[secondary:74] shift +epprd_rg:cl_export_fs[secondary:75] echo '' +epprd_rg:cl_export_fs[466] secondary='' +epprd_rg:cl_export_fs[468] nfs_node_state='' +epprd_rg:cl_export_fs[471] : Determine if grace periods are enabled +epprd_rg:cl_export_fs[473] ps -eo args +epprd_rg:cl_export_fs[473] grep -w nfsd +epprd_rg:cl_export_fs[473] grep -qw -- '-gp on' +epprd_rg:cl_export_fs[476] gp=off +epprd_rg:cl_export_fs[480] : We can use an NFSv4 node if grace periods are enabled, we are running a +epprd_rg:cl_export_fs[481] : 64-bit kernel, and the nfs4smctl command exists. +epprd_rg:cl_export_fs[483] [[ off == on ]] +epprd_rg:cl_export_fs[487] rm -f '/var/adm/nfsv4.hacmp/epprd_rg/*' +epprd_rg:cl_export_fs[487] 2> /dev/null +epprd_rg:cl_export_fs[491] : If we have NFSv4 exports, then we need to configure our NFS node so that +epprd_rg:cl_export_fs[492] : we can use stable storage. Note, NFS only supports this functionality in +epprd_rg:cl_export_fs[493] : its 64-bit kernels. +epprd_rg:cl_export_fs[495] [[ -n '' ]] +epprd_rg:cl_export_fs[580] [[ '' == acquiring ]] +epprd_rg:cl_export_fs[585] ALLEXPORTS=All_exports +epprd_rg:cl_export_fs[587] : update resource manager with this action +epprd_rg:cl_export_fs[589] cl_RMupdate resource_acquiring All_exports cl_export_fs 2023-01-28T17:10:54.469073 2023-01-28T17:10:54.473352 +epprd_rg:cl_export_fs[592] : Build a list of all filesystems that need to be exported, irrespective of +epprd_rg:cl_export_fs[593] : the protocol version. Since some filesystems may be exported with multiple +epprd_rg:cl_export_fs[594] : versions, remove any duplicates. +epprd_rg:cl_export_fs[596] echo /board_org +epprd_rg:cl_export_fs[596] tr ' ' '\n' +epprd_rg:cl_export_fs[596] sort -u +epprd_rg:cl_export_fs[596] FILESYSTEM_LIST=/board_org +epprd_rg:cl_export_fs[599] : Loop through all of the filesystems we need to export ... +epprd_rg:cl_export_fs[603] v3='' +epprd_rg:cl_export_fs[604] v4='' +epprd_rg:cl_export_fs[605] root=epprd:epprda:epprds +epprd_rg:cl_export_fs[606] new_options='' +epprd_rg:cl_export_fs[607] export_file_line='' +epprd_rg:cl_export_fs[608] USING_EXPORTS_FILE=0 +epprd_rg:cl_export_fs[609] export_lines='' +epprd_rg:cl_export_fs[610] otheroption='' +epprd_rg:cl_export_fs[613] : Get the export line from exportfs for this export +epprd_rg:cl_export_fs[615] exportfs +epprd_rg:cl_export_fs[615] grep '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_export_fs[615] export_line='' +epprd_rg:cl_export_fs[617] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[636] : If the filesystem currently is not exported, then get the options from +epprd_rg:cl_export_fs[637] : the exports file. We will merge these options with options specified +epprd_rg:cl_export_fs[638] : through resource group attributes to produce the actual options we will +epprd_rg:cl_export_fs[639] : provide to exportfs. +epprd_rg:cl_export_fs[641] [[ -z '' ]] +epprd_rg:cl_export_fs[643] export_line='' +epprd_rg:cl_export_fs[644] USING_EXPORTS_FILE=1 +epprd_rg:cl_export_fs[648] : In case of multiple exports for same filesystem +epprd_rg:cl_export_fs[649] : Process them line by line +epprd_rg:cl_export_fs[651] set +u +epprd_rg:cl_export_fs[652] oldifs=$' \t\n' +epprd_rg:cl_export_fs[653] IFS=$'\n' +epprd_rg:cl_export_fs[653] export_lines=( ) +epprd_rg:cl_export_fs[654] IFS=$' \t\n' +epprd_rg:cl_export_fs[656] [ -n '' ] +epprd_rg:cl_export_fs[733] set -u +epprd_rg:cl_export_fs[736] : At this point, v3 and v4 are set based on what is actually exported +epprd_rg:cl_export_fs[737] : or what is configured to be exported in the exports file. +epprd_rg:cl_export_fs[740] (( USING_EXPORTS_FILE )) +epprd_rg:cl_export_fs[742] v3='' +epprd_rg:cl_export_fs[743] v4='' +epprd_rg:cl_export_fs[747] : At this point, v3 and v4 are set based on what is actually exported. +epprd_rg:cl_export_fs[748] : Now add additional versions if the resource group has them configured. +epprd_rg:cl_export_fs[752] [[ /board_org == /board_org ]] +epprd_rg:cl_export_fs[752] v3=:2:3 +epprd_rg:cl_export_fs[752] break +epprd_rg:cl_export_fs[761] : Versions 2 and 3 are the default versions. Some versions of AIX do +epprd_rg:cl_export_fs[762] : not support the vers export option, so only use the option if we are +epprd_rg:cl_export_fs[763] : exporting a non-default value such as 4 +epprd_rg:cl_export_fs[765] [[ -n '' ]] +epprd_rg:cl_export_fs[779] [[ -n epprd:epprda:epprds ]] +epprd_rg:cl_export_fs[782] : If we have root priveliged clients, +epprd_rg:cl_export_fs[783] : then add them to the option list. +epprd_rg:cl_export_fs[785] new_options=,root=epprd:epprda:epprds +epprd_rg:cl_export_fs[788] [[ -n '' ]] +epprd_rg:cl_export_fs[798] : Strip off the leading comma +epprd_rg:cl_export_fs[800] echo ,root=epprd:epprda:epprds +epprd_rg:cl_export_fs[800] cut -d, -f2- +epprd_rg:cl_export_fs[800] new_options=root=epprd:epprda:epprds +epprd_rg:cl_export_fs[802] [[ -z root=epprd:epprda:epprds ]] +epprd_rg:cl_export_fs[811] : Exporting filesystem /board_org with options root=epprd:epprda:epprds +epprd_rg:cl_export_fs[813] exportfs -i -o root=epprd:epprda:epprds /board_org +epprd_rg:cl_export_fs[814] RC=0 +epprd_rg:cl_export_fs[817] (( 0 != 0 )) +epprd_rg:cl_export_fs[834] ALLNOERREXPORT=All_nonerror_exports +epprd_rg:cl_export_fs[836] : update resource manager with results +epprd_rg:cl_export_fs[838] cl_RMupdate resource_up All_nonerror_exports cl_export_fs 2023-01-28T17:10:54.524497 2023-01-28T17:10:54.528792 +epprd_rg:cl_export_fs[840] exit 0 +epprd_rg:process_resources(20.182)[export_filesystems:1662] RC=0 +epprd_rg:process_resources(20.182)[export_filesystems:1663] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources(20.182)[export_filesystems:1669] (( 0 != 0 )) +epprd_rg:process_resources(20.182)[export_filesystems:1675] return 0 +epprd_rg:process_resources(20.182)[3324] true +epprd_rg:process_resources(20.183)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(20.183)[3328] set -a +epprd_rg:process_resources(20.183)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T17:10:54.542350 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(20.196)[3329] eval JOB_TYPE=TELINIT +epprd_rg:process_resources(20.196)[1] JOB_TYPE=TELINIT +epprd_rg:process_resources(20.196)[3330] RC=0 +epprd_rg:process_resources(20.196)[3331] set +a +epprd_rg:process_resources(20.196)[3333] (( 0 != 0 )) +epprd_rg:process_resources(20.196)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(20.196)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(20.196)[3343] export GROUPNAME +epprd_rg:process_resources(20.196)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(20.196)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(20.196)[3360] [[ TELINIT == RELEASE ]] +epprd_rg:process_resources(20.196)[3360] [[ TELINIT == ONLINE ]] +epprd_rg:process_resources(20.196)[3435] cl_telinit +epprd_rg:cl_telinit[178] version=%I% +epprd_rg:cl_telinit[182] TELINIT_FILE=/usr/es/sbin/cluster/.telinit +epprd_rg:cl_telinit[183] USE_TELINIT_FILE=/usr/es/sbin/cluster/.use_telinit +epprd_rg:cl_telinit[185] [[ -f /usr/es/sbin/cluster/.use_telinit ]] +epprd_rg:cl_telinit[189] USE_TELINIT=0 +epprd_rg:cl_telinit[198] [[ '' == -boot ]] +epprd_rg:cl_telinit[236] cl_lsitab clinit +epprd_rg:cl_telinit[236] 1> /dev/null 2>& 1 +epprd_rg:cl_telinit[239] : telinit a disabled +epprd_rg:cl_telinit[241] return 0 +epprd_rg:process_resources(20.217)[3324] true +epprd_rg:process_resources(20.217)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(20.217)[3328] set -a +epprd_rg:process_resources(20.217)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T17:10:54.576241 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(20.230)[3329] eval JOB_TYPE=MOUNT_FILESYSTEMS ACTION=ACQUIRE FILE_SYSTEMS='"/board;/board_org"' RESOURCE_GROUPS='"epprd_rg' '"' NFS_NETWORKS='""' NFS_HOSTS='""' IP_LABELS='"epprd"' +epprd_rg:process_resources(20.230)[1] JOB_TYPE=MOUNT_FILESYSTEMS +epprd_rg:process_resources(20.230)[1] ACTION=ACQUIRE +epprd_rg:process_resources(20.230)[1] FILE_SYSTEMS='/board;/board_org' +epprd_rg:process_resources(20.230)[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources(20.230)[1] NFS_NETWORKS='' +epprd_rg:process_resources(20.230)[1] NFS_HOSTS='' +epprd_rg:process_resources(20.230)[1] IP_LABELS=epprd +epprd_rg:process_resources(20.230)[3330] RC=0 +epprd_rg:process_resources(20.230)[3331] set +a +epprd_rg:process_resources(20.230)[3333] (( 0 != 0 )) +epprd_rg:process_resources(20.230)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(20.230)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(20.230)[3343] export GROUPNAME +epprd_rg:process_resources(20.230)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(20.230)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(20.230)[3360] [[ MOUNT_FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources(20.230)[3360] [[ MOUNT_FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources(20.230)[3612] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources(20.230)[3614] mount_nfs_filesystems MOUNT +epprd_rg:process_resources(20.230)[mount_nfs_filesystems:1437] PS4_FUNC=mount_nfs_filesystems +epprd_rg:process_resources(20.230)[mount_nfs_filesystems:1437] typeset PS4_FUNC +epprd_rg:process_resources(20.230)[mount_nfs_filesystems:1438] [[ high == high ]] +epprd_rg:process_resources(20.230)[mount_nfs_filesystems:1438] set -x +epprd_rg:process_resources(20.230)[mount_nfs_filesystems:1440] post_event_member=FALSE +epprd_rg:process_resources(20.230)[mount_nfs_filesystems:1444] [[ epprda == epprda ]] +epprd_rg:process_resources(20.230)[mount_nfs_filesystems:1446] post_event_member=TRUE +epprd_rg:process_resources(20.230)[mount_nfs_filesystems:1447] break +epprd_rg:process_resources(20.230)[mount_nfs_filesystems:1452] : This node will not be in the resource group so do not mount filesystems. +epprd_rg:process_resources(20.230)[mount_nfs_filesystems:1454] [[ TRUE == FALSE ]] +epprd_rg:process_resources(20.230)[mount_nfs_filesystems:1459] STAT=0 +epprd_rg:process_resources(20.230)[mount_nfs_filesystems:1463] export GROUPNAME +epprd_rg:process_resources(20.231)[mount_nfs_filesystems:1465] get_list_head '/board;/board_org' +epprd_rg:process_resources(20.231)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(20.231)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(20.231)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(20.231)[get_list_head:60] set -x +epprd_rg:process_resources(20.232)[get_list_head:61] echo '/board;/board_org' +epprd_rg:process_resources(20.234)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(20.234)[get_list_head:61] IFS=: +epprd_rg:process_resources(20.235)[get_list_head:62] echo '/board;/board_org' +epprd_rg:process_resources(20.237)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(20.234)[mount_nfs_filesystems:1465] read LIST_OF_FILE_SYSTEMS_FOR_RG +epprd_rg:process_resources(20.242)[mount_nfs_filesystems:1466] get_list_tail '/board;/board_org' +epprd_rg:process_resources(20.242)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(20.242)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(20.242)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(20.242)[get_list_tail:68] set -x +epprd_rg:process_resources(20.243)[get_list_tail:69] echo '/board;/board_org' +epprd_rg:process_resources(20.244)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(20.244)[get_list_tail:69] IFS=: +epprd_rg:process_resources(20.245)[get_list_tail:70] echo +epprd_rg:process_resources(20.246)[mount_nfs_filesystems:1466] read FILE_SYSTEMS +epprd_rg:process_resources(20.247)[mount_nfs_filesystems:1468] get_list_head +epprd_rg:process_resources(20.247)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(20.247)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(20.247)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(20.247)[get_list_head:60] set -x +epprd_rg:process_resources(20.248)[get_list_head:61] echo +epprd_rg:process_resources(20.250)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(20.250)[get_list_head:61] IFS=: +epprd_rg:process_resources(20.251)[get_list_head:62] echo +epprd_rg:process_resources(20.252)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(20.250)[mount_nfs_filesystems:1468] read NFS_HOST +epprd_rg:process_resources(20.256)[mount_nfs_filesystems:1469] get_list_tail +epprd_rg:process_resources(20.256)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(20.256)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(20.256)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(20.256)[get_list_tail:68] set -x +epprd_rg:process_resources(20.257)[get_list_tail:69] echo +epprd_rg:process_resources(20.260)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(20.261)[get_list_tail:69] IFS=: +epprd_rg:process_resources(20.261)[get_list_tail:70] echo +epprd_rg:process_resources(20.260)[mount_nfs_filesystems:1469] read NFS_HOSTS +epprd_rg:process_resources(20.263)[mount_nfs_filesystems:1471] get_list_head +epprd_rg:process_resources(20.264)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(20.264)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(20.264)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(20.264)[get_list_head:60] set -x +epprd_rg:process_resources(20.266)[get_list_head:61] echo +epprd_rg:process_resources(20.265)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(20.265)[get_list_head:61] IFS=: +epprd_rg:process_resources(20.268)[get_list_head:62] echo +epprd_rg:process_resources(20.269)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(20.265)[mount_nfs_filesystems:1471] read NFS_NETWORK +epprd_rg:process_resources(20.274)[mount_nfs_filesystems:1472] get_list_tail +epprd_rg:process_resources(20.274)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(20.274)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(20.274)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(20.274)[get_list_tail:68] set -x +epprd_rg:process_resources(20.276)[get_list_tail:69] echo +epprd_rg:process_resources(20.277)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(20.277)[get_list_tail:69] IFS=: +epprd_rg:process_resources(20.277)[get_list_tail:70] echo +epprd_rg:process_resources(20.275)[mount_nfs_filesystems:1472] read NFS_NETWORKS +epprd_rg:process_resources(20.280)[mount_nfs_filesystems:1474] get_list_head epprd +epprd_rg:process_resources(20.280)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(20.280)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(20.280)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(20.280)[get_list_head:60] set -x +epprd_rg:process_resources(20.281)[get_list_head:61] echo epprd +epprd_rg:process_resources(20.283)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(20.283)[get_list_head:61] IFS=: +epprd_rg:process_resources(20.284)[get_list_head:62] echo epprd +epprd_rg:process_resources(20.284)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(20.282)[mount_nfs_filesystems:1474] read LIST_OF_IP_LABELS_FOR_RG +epprd_rg:process_resources(20.288)[mount_nfs_filesystems:1475] get_list_tail epprd +epprd_rg:process_resources(20.289)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(20.289)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(20.289)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(20.289)[get_list_tail:68] set -x +epprd_rg:process_resources(20.290)[get_list_tail:69] echo epprd +epprd_rg:process_resources(20.293)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(20.293)[get_list_tail:69] IFS=: +epprd_rg:process_resources(20.293)[get_list_tail:70] echo +epprd_rg:process_resources(20.293)[mount_nfs_filesystems:1475] read IP_LABELS +epprd_rg:process_resources(20.294)[mount_nfs_filesystems:1477] MOUNT_FILESYSTEM='/board;/board_org' +epprd_rg:process_resources(20.294)[mount_nfs_filesystems:1478] NFSMOUNT_LABEL=epprd +epprd_rg:process_resources(20.294)[mount_nfs_filesystems:1481] : Do the required NFS_mounts. +epprd_rg:process_resources(20.294)[mount_nfs_filesystems:1484] NW_NFSMOUNT_LABEL='' +epprd_rg:process_resources(20.294)[mount_nfs_filesystems:1486] [[ -z '' ]] +epprd_rg:process_resources(20.294)[mount_nfs_filesystems:1488] NFS_HOST=epprda +epprd_rg:process_resources(20.294)[mount_nfs_filesystems:1491] NFSHOST='' +epprd_rg:process_resources(20.294)[mount_nfs_filesystems:1492] [[ -n epprda ]] +epprd_rg:process_resources(20.294)[mount_nfs_filesystems:1494] [[ -n '' ]] +epprd_rg:process_resources(20.294)[mount_nfs_filesystems:1516] [[ MOUNT == REMOUNT ]] +epprd_rg:process_resources(20.294)[mount_nfs_filesystems:1526] ping epprd 1024 1 +epprd_rg:process_resources(20.296)[mount_nfs_filesystems:1526] 1> /dev/null +epprd_rg:process_resources(20.299)[mount_nfs_filesystems:1528] NFSHOST=epprd +epprd_rg:process_resources(20.299)[mount_nfs_filesystems:1529] break +epprd_rg:process_resources(20.299)[mount_nfs_filesystems:1533] [[ -n epprd ]] +epprd_rg:process_resources(20.299)[mount_nfs_filesystems:1536] : activate_nfs will not wait for the mounts to complete +epprd_rg:process_resources(20.299)[mount_nfs_filesystems:1538] cl_activate_nfs 1 epprd '/board;/board_org' +epprd_rg:cl_activate_nfs[68] [[ high == high ]] +epprd_rg:cl_activate_nfs[68] version='1.19.4.2 $Source$' +epprd_rg:cl_activate_nfs[70] . /usr/es/sbin/cluster/events/utils/cl_nfs_utils +epprd_rg:cl_activate_nfs[98] PROGNAME=cl_activate_nfs +epprd_rg:cl_activate_nfs[99] [[ high == high ]] +epprd_rg:cl_activate_nfs[101] set -x +epprd_rg:cl_activate_nfs[102] version=%I +epprd_rg:cl_activate_nfs[105] cl_exports_data='' +epprd_rg:cl_activate_nfs[105] typeset cl_exports_data +epprd_rg:cl_activate_nfs[106] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_activate_nfs[72] set -u +epprd_rg:cl_activate_nfs[242] grep -w ^MOUNT_WLMCNTRL_SELFMANAGE /etc/environment +epprd_rg:cl_activate_nfs[242] export eval +epprd_rg:cl_activate_nfs[244] (( 3 < 3 )) +epprd_rg:cl_activate_nfs[253] ATTEMPTS=1 +epprd_rg:cl_activate_nfs[253] typeset -li ATTEMPTS +epprd_rg:cl_activate_nfs[254] HOST=epprd +epprd_rg:cl_activate_nfs[256] shift 2 +epprd_rg:cl_activate_nfs[261] FILELIST='/board;/board_org' +epprd_rg:cl_activate_nfs[266] print '/board;/board_org' +epprd_rg:cl_activate_nfs[266] grep -q '\;/' +epprd_rg:cl_activate_nfs[271] CROSSMOUNTS=TRUE +epprd_rg:cl_activate_nfs[272] print '/board;/board_org' +epprd_rg:cl_activate_nfs[272] /bin/sort -k 1,1 '-t;' +epprd_rg:cl_activate_nfs[272] tr ' ' '\n' +epprd_rg:cl_activate_nfs[272] MOUNTLIST='/board;/board_org' +epprd_rg:cl_activate_nfs[281] ALLNFS=All_nfs_mounts +epprd_rg:cl_activate_nfs[282] cl_RMupdate resource_acquiring All_nfs_mounts cl_activate_nfs 2023-01-28T17:10:54.705184 2023-01-28T17:10:54.709404 +epprd_rg:cl_activate_nfs[288] odmget -q name='RECOVERY_METHOD AND group=epprd_rg' HACMPresource +epprd_rg:cl_activate_nfs[289] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_activate_nfs[288] METHOD=sequential +epprd_rg:cl_activate_nfs[291] odmget -q name='EXPORT_FILESYSTEM AND group=epprd_rg' HACMPresource +epprd_rg:cl_activate_nfs[291] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_activate_nfs[291] EXPORT_FILESYSTEM=/board_org +epprd_rg:cl_activate_nfs[293] odmget -q name='EXPORT_FILESYSTEM_V4 AND group=epprd_rg' HACMPresource +epprd_rg:cl_activate_nfs[293] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_activate_nfs[293] EXPORT_FILESYSTEM_V4='' +epprd_rg:cl_activate_nfs[302] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_activate_nfs[304] [[ -z '' ]] +epprd_rg:cl_activate_nfs[305] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_activate_nfs[311] VERSION_SOURCE=DEFAULT +epprd_rg:cl_activate_nfs[320] [[ DEFAULT == FILES ]] +epprd_rg:cl_activate_nfs[377] [[ -x /usr/sbin/nfsrgyd ]] +epprd_rg:cl_activate_nfs[378] [[ -n '' ]] +epprd_rg:cl_activate_nfs[379] grep -q vers=4 /etc/filesystems +epprd_rg:cl_activate_nfs[394] [[ TRUE == TRUE ]] +epprd_rg:cl_activate_nfs[411] filesystem=/board_org +epprd_rg:cl_activate_nfs[412] mountpoint=/board +epprd_rg:cl_activate_nfs:/board;/board_org[429] PS4_LOOP='/board;/board_org' +epprd_rg:cl_activate_nfs:/board;/board_org[430] [[ sequential == sequential ]] +epprd_rg:cl_activate_nfs:/board;/board_org[432] nfs_mount 1 epprd /board_org /board +epprd_rg:cl_activate_nfs(0.081):/board;/board_org[nfs_mount:99] (( 4 != 4 )) +epprd_rg:cl_activate_nfs(0.081):/board;/board_org[nfs_mount:108] LIMIT=1 +epprd_rg:cl_activate_nfs(0.081):/board;/board_org[nfs_mount:108] typeset -li LIMIT +epprd_rg:cl_activate_nfs(0.081):/board;/board_org[nfs_mount:109] HOST=epprd +epprd_rg:cl_activate_nfs(0.081):/board;/board_org[nfs_mount:110] FileSystem=/board_org +epprd_rg:cl_activate_nfs(0.081):/board;/board_org[nfs_mount:111] MountPoint=/board +epprd_rg:cl_activate_nfs(0.082):/board;/board_org[nfs_mount:116] mount +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:119] [[ mounted == /board ]] +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:119] [[ --------------- == /board ]] +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:119] [[ procfs == /board ]] +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:119] [[ ahafs == /board ]] +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:119] [[ /sapcd == /board ]] +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.084):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.085):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.085):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.085):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.085):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.085):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.085):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.085):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.085):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.085):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.085):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.085):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.085):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.085):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.085):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.085):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.085):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.085):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.085):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.085):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.085):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.085):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.085):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.085):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.085):/board;/board_org[nfs_mount:126] vers='' +epprd_rg:cl_activate_nfs(0.085):/board;/board_org[nfs_mount:127] [[ DEFAULT == ODM ]] +epprd_rg:cl_activate_nfs(0.086):/board;/board_org[nfs_mount:141] lsfs -c -v nfs +epprd_rg:cl_activate_nfs(0.089):/board;/board_org[nfs_mount:141] grep ^/board: +epprd_rg:cl_activate_nfs(0.091):/board;/board_org[nfs_mount:141] cut -d: -f7 +epprd_rg:cl_activate_nfs(0.094):/board;/board_org[nfs_mount:141] OPTIONS=bg,soft,intr,sec=sys,rw +epprd_rg:cl_activate_nfs(0.095):/board;/board_org[nfs_mount:142] echo bg,soft,intr,sec=sys,rw +epprd_rg:cl_activate_nfs(0.097):/board;/board_org[nfs_mount:142] sed s/+/:/g +epprd_rg:cl_activate_nfs(0.100):/board;/board_org[nfs_mount:142] OPTIONS=bg,soft,intr,sec=sys,rw +epprd_rg:cl_activate_nfs(0.100):/board;/board_org[nfs_mount:144] [[ -z bg,soft,intr,sec=sys,rw ]] +epprd_rg:cl_activate_nfs(0.100):/board;/board_org[nfs_mount:152] print bg,soft,intr,sec=sys,rw +epprd_rg:cl_activate_nfs(0.101):/board;/board_org[nfs_mount:152] grep -q intr +epprd_rg:cl_activate_nfs(0.104):/board;/board_org[nfs_mount:168] [[ -n '' ]] +epprd_rg:cl_activate_nfs(0.104):/board;/board_org[nfs_mount:175] [[ sequential == sequential ]] +epprd_rg:cl_activate_nfs(0.105):/board;/board_org[nfs_mount:177] print bg,soft,intr,sec=sys,rw +epprd_rg:cl_activate_nfs(0.107):/board;/board_org[nfs_mount:177] sed s/bg/fg/g +epprd_rg:cl_activate_nfs(0.109):/board;/board_org[nfs_mount:177] OPTIONS=fg,soft,intr,sec=sys,rw +epprd_rg:cl_activate_nfs(0.110):/board;/board_org[nfs_mount:178] let LIMIT+=4 +epprd_rg:cl_activate_nfs(0.110):/board;/board_org[nfs_mount:184] typeset RC +epprd_rg:cl_activate_nfs(0.110):/board;/board_org[nfs_mount:186] amlog_trace '' 'Activating NFS|/board_org' +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:319] cltime +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:319] DATE=2023-01-28T17:10:54.788796 +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:320] echo '|2023-01-28T17:10:54.788796|INFO: Activating NFS|/board_org' +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_nfs(0.138):/board;/board_org[nfs_mount:187] (( TRIES=0)) +epprd_rg:cl_activate_nfs(0.138):/board;/board_org[nfs_mount:187] (( TRIES' 'Activating NFS|/board_org' +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:319] cltime +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:319] DATE=2023-01-28T17:10:54.861904 +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:320] echo '|2023-01-28T17:10:54.861904|INFO: Activating NFS|/board_org' +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_nfs(0.211):/board;/board_org[nfs_mount:203] return 0 +epprd_rg:process_resources(20.515)[mount_nfs_filesystems:1540] RC=0 +epprd_rg:process_resources(20.515)[mount_nfs_filesystems:1541] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources(20.515)[mount_nfs_filesystems:1549] (( 0 != 0 )) +epprd_rg:process_resources(20.515)[mount_nfs_filesystems:1565] return 0 +epprd_rg:process_resources(20.515)[3324] true +epprd_rg:process_resources(20.515)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(20.515)[3328] set -a +epprd_rg:process_resources(20.516)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T17:10:54.874956 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(20.528)[3329] eval JOB_TYPE=NONE +epprd_rg:process_resources(20.528)[1] JOB_TYPE=NONE +epprd_rg:process_resources(20.528)[3330] RC=0 +epprd_rg:process_resources(20.528)[3331] set +a +epprd_rg:process_resources(20.528)[3333] (( 0 != 0 )) +epprd_rg:process_resources(20.528)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(20.528)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(20.528)[3343] export GROUPNAME +epprd_rg:process_resources(20.528)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(20.528)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(20.528)[3360] [[ NONE == RELEASE ]] +epprd_rg:process_resources(20.528)[3360] [[ NONE == ONLINE ]] +epprd_rg:process_resources(20.528)[3729] break +epprd_rg:process_resources(20.528)[3740] : If sddsrv was turned off above, turn it back on again +epprd_rg:process_resources(20.529)[3742] [[ FALSE == TRUE ]] +epprd_rg:process_resources(20.529)[3747] exit 0 :rg_move[247] : unsetting AM_SYNC_CALLED_BY from $'callers environment as\n: we dont' require it after this point in execution. :rg_move[250] unset AM_SYNC_CALLED_BY :rg_move[253] [[ -f /tmp/.NFSSTOPPED ]] :rg_move[274] [[ -f /tmp/.RPCLOCKDSTOPPED ]] :rg_move[276] rm -f /tmp/.RPCLOCKDSTOPPED :rg_move[277] ATTEMPT=0 :rg_move[277] typeset -li ATTEMPT :rg_move[278] (( ATTEMPT++ < 60 )) :rg_move[280] : rpc.lockd status check :rg_move[281] lssrc -s rpc.lockd :rg_move[281] LC_ALL=C :rg_move[281] grep stopping :rg_move[282] (( 1 == 0 )) :rg_move[282] break :rg_move[285] startsrc -s rpc.lockd 0513-059 The rpc.lockd Subsystem has been started. Subsystem PID is 26804666. :rg_move[286] rcstartsrc=0 :rg_move[287] (( 0 != 0 )) :rg_move[293] exit 0 Jan 28 2023 17:10:54 EVENT COMPLETED: rg_move epprda 1 ACQUIRE 0 |2023-01-28T17:10:54|28698|EVENT COMPLETED: rg_move epprda 1 ACQUIRE 0| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-01-28T17:10:54.991446 :clevlog[amlog_trace:320] echo '|2023-01-28T17:10:54.991446|INFO: rg_move|epprd_rg|epprda|1|ACQUIRE|0' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :rg_move_acquire[+119] exit_status=0 :rg_move_acquire[+120] : exit status of clcallev rg_move epprda 1 ACQUIRE is: 0 :rg_move_acquire[+121] exit 0 Jan 28 2023 17:10:55 EVENT COMPLETED: rg_move_acquire epprda 1 0 |2023-01-28T17:10:55|28698|EVENT COMPLETED: rg_move_acquire epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T17:10:55.116376 + echo '|2023-01-28T17:10:55.116376|INFO: rg_move_acquire|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 17:10:55 EVENT START: rg_move_complete epprda 1 |2023-01-28T17:10:55|28698|EVENT START: rg_move_complete epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T17:10:55.310306 + echo '|2023-01-28T17:10:55.310306|INFO: rg_move_complete|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :rg_move_complete[91] version=%I% :rg_move_complete[97] STATUS=0 :rg_move_complete[97] typeset -li STATUS :rg_move_complete[99] [[ -z '' ]] :rg_move_complete[101] EMULATE=REAL :rg_move_complete[104] set -u :rg_move_complete[106] (( 2 < 2 || 2 > 3 )) :rg_move_complete[112] NODENAME=epprda :rg_move_complete[112] export NODENAME :rg_move_complete[113] RGID=1 :rg_move_complete[114] (( 2 == 3 )) :rg_move_complete[118] RGDESTINATION='' :rg_move_complete[122] : serial number for this event is 28698 :rg_move_complete[126] : Interpret resource group ID into a resource group name. :rg_move_complete[128] clodmget -qid=1 -f group -n HACMPgroup :rg_move_complete[128] eval RGNAME=epprd_rg :rg_move_complete[1] RGNAME=epprd_rg +epprd_rg:rg_move_complete[129] GROUPNAME=epprd_rg +epprd_rg:rg_move_complete[131] UPDATESTATD=0 +epprd_rg:rg_move_complete[131] typeset -li UPDATESTATD +epprd_rg:rg_move_complete[132] NFSSTOPPED=0 +epprd_rg:rg_move_complete[132] typeset -li NFSSTOPPED +epprd_rg:rg_move_complete[133] LIMIT=60 +epprd_rg:rg_move_complete[133] WAIT=1 +epprd_rg:rg_move_complete[133] TRY=0 +epprd_rg:rg_move_complete[133] typeset -li LIMIT WAIT TRY +epprd_rg:rg_move_complete[136] : If this is a two node cluster and exported filesystems exist, then +epprd_rg:rg_move_complete[137] : when the cluster topology is stable notify rpc.statd of the changes. +epprd_rg:rg_move_complete[139] clnodename +epprd_rg:rg_move_complete[139] wc -l +epprd_rg:rg_move_complete[139] (( 2 == 2 )) +epprd_rg:rg_move_complete[141] clodmget -f group -n HACMPgroup +epprd_rg:rg_move_complete[141] RESOURCE_GROUPS=epprd_rg +epprd_rg:rg_move_complete[144] clodmget -q group='epprd_rg AND name=EXPORT_FILESYSTEM' -f value -n HACMPresource +epprd_rg:rg_move_complete[144] EXPORTLIST=/board_org +epprd_rg:rg_move_complete[146] [[ -n /board_org ]] +epprd_rg:rg_move_complete[146] [[ epprd_rg == epprd_rg ]] +epprd_rg:rg_move_complete[148] UPDATESTATD=1 +epprd_rg:rg_move_complete[149] [[ REAL == EMUL ]] +epprd_rg:rg_move_complete[154] cl_update_statd :cl_update_statd(0)[+174] version=%I% :cl_update_statd(0)[+176] typeset -i RC=0 :cl_update_statd(0)[+178] LOCAL_FOUND= :cl_update_statd(0)[+179] TWIN_NAME= :cl_update_statd(0)[+180] [[ -z epprda ]] :cl_update_statd(0)[+181] :cl_update_statd(0)[+181] cl_get_path -S OP_SEP=~ :cl_update_statd(0)[+182] set -u :cl_update_statd(0)[+187] LOCAL_FOUND=true :cl_update_statd(0)[+194] : Make sure statd is running locally :cl_update_statd(0)[+196] lssrc -s statd :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw inoperative :cl_update_statd(0)[+198] cl_msg -e 0 -m 10744 %1$s[%2$d]: statd is not up on the local node \n cl_update_statd 198 :cl_msg[58] version=1.3.1.1 :cl_msg[68] getopts e:s:c:m: opt :cl_msg[74] : Error or standard message. 0 is standard :cl_msg[75] MSG_TYPE=0 :cl_msg[68] getopts e:s:c:m: opt :cl_msg[78] : Message ID in given set and catalog :cl_msg[79] MSG_ID=10744 :cl_msg[68] getopts e:s:c:m: opt :cl_msg[87] shift 4 :cl_msg[89] : All the rest is the default message and data - '%1$s[%2$d]: statd is not up on the local node \n' cl_update_statd 198 :cl_msg[92] [[ -z '' ]] :cl_msg[94] MSG_CAT=scripts.cat :cl_msg[97] [[ -z 0 ]] :cl_msg[102] [[ -z 10744 ]] :cl_msg[107] SYSLOG_CONF='' :cl_msg[107] typeset SYSLOG_CONF :cl_msg[108] clgetsyslog :cl_msg[108] SYSLOG_CONF=/etc/syslog.conf :cl_msg[110] (( 0 != 0 )) :cl_msg[115] : Look up the message in the catalog :cl_msg[117] dspmsg scripts.cat 10744 '%1$s[%2$d]: statd is not up on the local node \n' cl_update_statd 198 :cl_msg[117] 2>& 1 :cl_msg[117] MSG='cl_update_statd[198]: statd is not up on the local node ' :cl_msg[120] : This is where we print out the parts of the message when we have :cl_msg[121] : an error. We also write to the syslog if it is configured. :cl_msg[123] (( 0 != 0 )) :cl_msg[152] print -u2 Jan 28 2023 17:10:55 'cl_update_statd[198]:' statd is not up on the local node Jan 28 2023 17:10:55 cl_update_statd[198]: statd is not up on the local node :cl_msg[155] : Finally, synchronize the syslog file but only if syslog is configured and :cl_msg[156] : the file exists. :cl_msg[158] [[ -n '' ]] :cl_msg[163] exit :cl_update_statd(0)[+200] : Attempt to recover this situation by restarting statd :cl_update_statd(0)[+202] startsrc -s rpc.statd 0513-059 The rpc.statd Subsystem has been started. Subsystem PID is 7864678. :cl_update_statd(0)[+203] sleep 5 :cl_update_statd(5)[+207] : Get the current twin, if there is one :cl_update_statd(5)[+209] :cl_update_statd(5)[+209] nfso -H sm_gethost :cl_update_statd(5)[+209] 2>& 1 CURTWIN= :cl_update_statd(5)[+210] RC=0 :cl_update_statd(5)[+212] [[ -z true ]] :cl_update_statd(5)[+212] [[ -z ]] :cl_update_statd(5)[+215] : Local node is no longer a cluster member, unregister its twin :cl_update_statd(5)[+215] [[ -n ]] :cl_update_statd(5)[+259] : RC is actually 0 :cl_update_statd(5)[+266] return 0 +epprd_rg:rg_move_complete[155] (( 0 != 0 )) +epprd_rg:rg_move_complete[160] break +epprd_rg:rg_move_complete[166] : Set the RESOURCE_GROUPS environment variable with the names +epprd_rg:rg_move_complete[167] : of all resource groups participating in this event, and export +epprd_rg:rg_move_complete[168] : them to all successive scripts. +epprd_rg:rg_move_complete[170] set -a +epprd_rg:rg_move_complete[171] clsetenvgrp epprda rg_move_complete epprd_rg :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move_complete epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 +epprd_rg:rg_move_complete[171] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_complete[172] RC=0 +epprd_rg:rg_move_complete[173] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_complete[1] FORCEDOWN_GROUPS='' +epprd_rg:rg_move_complete[2] RESOURCE_GROUPS='' +epprd_rg:rg_move_complete[3] HOMELESS_GROUPS='' +epprd_rg:rg_move_complete[4] HOMELESS_FOLLOWER_GROUPS='' +epprd_rg:rg_move_complete[5] ERRSTATE_GROUPS='' +epprd_rg:rg_move_complete[6] PRINCIPAL_ACTIONS='' +epprd_rg:rg_move_complete[7] ASSOCIATE_ACTIONS='' +epprd_rg:rg_move_complete[8] AUXILLIARY_ACTIONS='' +epprd_rg:rg_move_complete[8] SIBLING_GROUPS='' +epprd_rg:rg_move_complete[9] SIBLING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[10] SIBLING_ACQUIRING_GROUPS='' +epprd_rg:rg_move_complete[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[12] SIBLING_RELEASING_GROUPS='' +epprd_rg:rg_move_complete[13] SIBLING_RELEASING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[174] set +a +epprd_rg:rg_move_complete[175] (( 0 != 0 )) +epprd_rg:rg_move_complete[182] : For each participating resource group, serially process the resources. +epprd_rg:rg_move_complete[251] (( 1 == 1 )) +epprd_rg:rg_move_complete[253] [[ REAL == EMUL ]] +epprd_rg:rg_move_complete[259] stopsrc -s rpc.lockd 0513-044 The rpc.lockd Subsystem was requested to stop. +epprd_rg:rg_move_complete[260] rcstopsrc=0 +epprd_rg:rg_move_complete[261] (( 0 != 0 )) +epprd_rg:rg_move_complete[266] (( TRY=0)) +epprd_rg:rg_move_complete[266] (( 0<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 1<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 2<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 3<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 4<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 5<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 6<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 7<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 8<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 9<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 10<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 11<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 12<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 13<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 14<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 15<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 16<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 17<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 18<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 19<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 20<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 21<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 22<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 23<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 24<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 25<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z '' ]] +epprd_rg:rg_move_complete[273] break +epprd_rg:rg_move_complete[277] [[ ! -z '' ]] +epprd_rg:rg_move_complete[300] : Sure that rpc.lockd stopped. Restart it. +epprd_rg:rg_move_complete[302] startsrc -s rpc.lockd 0513-059 The rpc.lockd Subsystem has been started. Subsystem PID is 26542484. +epprd_rg:rg_move_complete[303] rcstartsrc=0 +epprd_rg:rg_move_complete[304] (( 0 != 0 )) +epprd_rg:rg_move_complete[365] : If the resource group in this rg_move is now homeless, +epprd_rg:rg_move_complete[366] : then we need to put it into an error state. +epprd_rg:rg_move_complete[368] active_node=0 +epprd_rg:rg_move_complete[428] : If the resource group in this rg_move is now homeless_secondary, +epprd_rg:rg_move_complete[429] : then we need to put it into an errorsecondary state. +epprd_rg:rg_move_complete[437] : Set an error state for concurrent groups that have +epprd_rg:rg_move_complete[438] : been brought offline on this node by rg_move. +epprd_rg:rg_move_complete[453] AM_SYNC_CALLED_BY=RG_MOVE_COMPLETE +epprd_rg:rg_move_complete[453] export AM_SYNC_CALLED_BY +epprd_rg:rg_move_complete[454] process_resources :process_resources[3318] version=1.169 :process_resources[3321] STATUS=0 :process_resources[3322] sddsrv_off=FALSE :process_resources[3324] true :process_resources[3326] : call rgpa, and it will tell us what to do next :process_resources[3328] set -a :process_resources[3329] clRGPA :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa 2023-01-28T17:11:25.661963 clrgpa :clRGPA[+55] exit 0 :process_resources[3329] eval JOB_TYPE=SYNC_VGS ACTION=ACQUIRE VOLUME_GROUPS='"datavg"' RESOURCE_GROUPS='"epprd_rg' '"' :process_resources[1] JOB_TYPE=SYNC_VGS :process_resources[1] ACTION=ACQUIRE :process_resources[1] VOLUME_GROUPS=datavg :process_resources[1] RESOURCE_GROUPS='epprd_rg ' :process_resources[3330] RC=0 :process_resources[3331] set +a :process_resources[3333] (( 0 != 0 )) :process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ SYNC_VGS == RELEASE ]] +epprd_rg:process_resources[3360] [[ SYNC_VGS == ONLINE ]] +epprd_rg:process_resources[3474] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[3476] sync_volume_groups +epprd_rg:process_resources[sync_volume_groups:2699] PS4_FUNC=sync_volume_groups +epprd_rg:process_resources[sync_volume_groups:2699] typeset PS4_FUNC +epprd_rg:process_resources[sync_volume_groups:2700] [[ high == high ]] +epprd_rg:process_resources[sync_volume_groups:2700] set -x +epprd_rg:process_resources[sync_volume_groups:2701] STAT=0 +epprd_rg:process_resources[sync_volume_groups:2704] export GROUPNAME +epprd_rg:process_resources[sync_volume_groups:2706] get_list_head datavg +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo datavg +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo datavg +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[sync_volume_groups:2706] read LIST_OF_VOLUME_GROUPS_FOR_RG +epprd_rg:process_resources[sync_volume_groups:2707] get_list_tail datavg +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo datavg +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[sync_volume_groups:2707] read VOLUME_GROUPS +epprd_rg:process_resources[sync_volume_groups:2710] : Sync the active volume groups +epprd_rg:process_resources[sync_volume_groups:2712] lsvg -L -o +epprd_rg:process_resources[sync_volume_groups:2712] sort +epprd_rg:process_resources[sync_volume_groups:2712] 2> /tmp/lsvg.err +epprd_rg:process_resources[sync_volume_groups:2712] 1> /tmp/lsvg.out.26804672 +epprd_rg:process_resources[sync_volume_groups:2713] echo datavg +epprd_rg:process_resources[sync_volume_groups:2713] tr ' ' '\n' +epprd_rg:process_resources[sync_volume_groups:2714] sort +epprd_rg:process_resources[sync_volume_groups:2714] comm -12 /tmp/lsvg.out.26804672 - +epprd_rg:process_resources[sync_volume_groups:2716] cl_sync_vgs datavg +epprd_rg:process_resources[sync_volume_groups:2718] [[ -s /tmp/lsvg.err ]] +epprd_rg:process_resources[sync_volume_groups:2723] rm -f /tmp/lsvg.out.26804672 /tmp/lsvg.err +epprd_rg:cl_sync_vgs[303] version=1.24.1.4 +epprd_rg:cl_sync_vgs[306] (( 1 == 0 )) +epprd_rg:cl_sync_vgs[312] : syncing 4 stale PPs at a time seems to be a win most of the time, but +epprd_rg:cl_sync_vgs[313] : we honor the NUM_PARALLEL_LPS value from /etc/environment, as does +epprd_rg:cl_sync_vgs[314] : syncvg. +epprd_rg:cl_sync_vgs[316] syncflag='' +epprd_rg:cl_sync_vgs[316] export syncflag +epprd_rg:cl_sync_vgs[317] PS4_LOOP='' +epprd_rg:cl_sync_vgs[317] export PS4_LOOP +epprd_rg:cl_sync_vgs[318] typeset -i npl +epprd_rg:cl_sync_vgs[319] grep -q ^NUM_PARALLEL_LPS= /etc/environment +epprd_rg:process_resources[sync_volume_groups:2732] unset AM_SYNC_CALLED_BY +epprd_rg:process_resources[sync_volume_groups:2734] return 0 +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:cl_sync_vgs[321] syncflag=-P4 +epprd_rg:cl_sync_vgs[328] echo 'NOTE: While the sync is going on, volume group can be used' NOTE: While the sync is going on, volume group can be used +epprd_rg:cl_sync_vgs[331] : For GLVM volume groups, read PARALLEL LPS count from HACMPresource if it is set from GUI, +epprd_rg:cl_sync_vgs[332] : else, read from environment variables, if it is not set use 32 as default value. +epprd_rg:cl_sync_vgs[334] clodmget -q name='GMVG_REP_RESOURCE and value=datavg' -f group HACMPresource +epprd_rg:cl_sync_vgs[334] 2> /dev/null +epprd_rg:cl_sync_vgs[334] glvm_rg='' +epprd_rg:cl_sync_vgs[335] [[ -n '' ]] +epprd_rg:cl_sync_vgs[353] check_sync datavg +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:76] typeset vg_name +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:77] typeset vgid +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:78] typeset disklist +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:79] typeset lv_name +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:80] typeset -li stale_count +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:81] typeset -li mode +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:82] RC=0 +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:82] typeset -li RC +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:83] typeset site_node_list +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:84] typeset site_choice +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:86] vg_name=datavg +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:87] disklist='' +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:89] getlvodm -v datavg +epprd_rg:cl_sync_vgs(0.027):datavg[check_sync:89] vgid=00c44af100004b00000001851e9dc053 +epprd_rg:cl_sync_vgs(0.027):datavg[check_sync:92] : find disks in the VG that LVM thinks are inaccessable +epprd_rg:cl_sync_vgs(0.027):datavg[check_sync:94] lsvg -L -p datavg +epprd_rg:cl_sync_vgs(0.027):datavg[check_sync:94] LC_ALL=C +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T17:11:25.749060 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=APPLICATIONS ACTION=ACQUIRE ALL_APPLICATIONS='"epprd_app"' RESOURCE_GROUPS='"epprd_rg' '"' MISCDATA='""' +epprd_rg:process_resources[1] JOB_TYPE=APPLICATIONS +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] ALL_APPLICATIONS=epprd_app +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] MISCDATA='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ APPLICATIONS == RELEASE ]] +epprd_rg:process_resources[3360] [[ APPLICATIONS == ONLINE ]] +epprd_rg:process_resources[3549] process_applications ACQUIRE +epprd_rg:process_resources[process_applications:312] PS4_FUNC=process_applications +epprd_rg:process_resources[process_applications:312] typeset PS4_FUNC +epprd_rg:process_resources[process_applications:313] [[ high == high ]] +epprd_rg:process_resources[process_applications:313] set -x +epprd_rg:process_resources[process_applications:316] : Each subprocess will log to a file with this name and PID +epprd_rg:process_resources[process_applications:318] TMP_FILE=/var/hacmp/log/.process_resources_applications.26804672 +epprd_rg:process_resources[process_applications:318] export TMP_FILE +epprd_rg:process_resources[process_applications:320] rm -f '/var/hacmp/log/.process_resources_applications*' +epprd_rg:process_resources[process_applications:322] WAITPIDS='' +epprd_rg:process_resources[process_applications:323] LPAR_ACQUIRE_FAILED=0 +epprd_rg:process_resources[process_applications:324] LPAR_RELEASE_FAILED=0 +epprd_rg:process_resources[process_applications:325] START_STOP_FAILED=0 +epprd_rg:process_resources[process_applications:326] LIST_OF_APPS=epprd_app +epprd_rg:process_resources[process_applications:329] : Acquire lpar resources in one-shot before starting applications +epprd_rg:process_resources[process_applications:331] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[process_applications:333] GROUPNAME=epprd_rg +epprd_rg:process_resources[process_applications:333] export GROUPNAME +epprd_rg:process_resources[process_applications:334] clmanageroha -o acquire -s -l epprd_app +epprd_rg:process_resources[process_applications:334] 3>& 2 +epprd_rg:clmanageroha[318] : version='@(#)' 5881272 43haes/usr/sbin/cluster/events/clmanageroha.sh, 61aha_r726, 2205A_aha726, May 16 2022 12:15 PM +epprd_rg:clmanageroha[321] clodmget -n -f connection_type HACMPhmcparam +epprd_rg:clmanageroha[321] CONN_TYPE=0 +epprd_rg:clmanageroha[321] typeset -i CONN_TYPE +epprd_rg:clmanageroha[323] clodmget -q name='epprda and object like POWERVS_*' -nf name HACMPnode +epprd_rg:clmanageroha[323] 2> /dev/null +epprd_rg:clmanageroha[323] [[ -n '' ]] +epprd_rg:clmanageroha[326] export CONN_TYPE +epprd_rg:clmanageroha[331] roha_session_open -o acquire -s -l epprd_app +epprd_rg:clmanageroha[roha_session_open:131] roha_session.id=27001166 +epprd_rg:clmanageroha[roha_session_open:132] date +epprd_rg:clmanageroha[roha_session_open:132] LC_ALL=C +epprd_rg:clmanageroha[roha_session_open:132] roha_session_log 'Open session 27001166 at Sat Jan 28 17:11:25 KORST 2023' [ROHALOG:27001166:(0.071)] Open session 27001166 at Sat Jan 28 17:11:25 KORST 2023 +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:146] roha_session.operation=acquire +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:143] roha_session.systemmirror_mode=1 +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:149] roha_session.optimal_apps=epprd_app +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:163] [[ acquire != @(acquire|release|adjust) ]] +epprd_rg:clmanageroha[roha_session_open:168] no_roha_apps=0 +epprd_rg:clmanageroha[roha_session_open:168] typeset -i no_roha_apps +epprd_rg:clmanageroha[roha_session_open:169] need_explicit_res_rel=0 +epprd_rg:clmanageroha[roha_session_open:169] typeset -i need_explicit_res_rel +epprd_rg:clmanageroha[roha_session_open:187] [[ -n epprd_app ]] +epprd_rg:clmanageroha[roha_session_open:187] sort +epprd_rg:clmanageroha[roha_session_open:187] clmgr q roha +epprd_rg:clmanageroha[roha_session_open:187] uniq -d +epprd_rg:cl_sync_vgs(0.185):datavg[check_sync:94] disklist=$'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:cl_sync_vgs(0.192):datavg[check_sync:95] grep -w missing +epprd_rg:cl_sync_vgs(0.193):datavg[check_sync:95] print -- $'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:cl_sync_vgs(0.202):datavg[check_sync:95] cut -f1 '-d ' +epprd_rg:cl_sync_vgs(0.208):datavg[check_sync:95] missing_disklist='' +epprd_rg:cl_sync_vgs(0.210):datavg[check_sync:96] print -- $'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:cl_sync_vgs(0.215):datavg[check_sync:96] grep -w removed +epprd_rg:cl_sync_vgs(0.225):datavg[check_sync:96] cut -f1 '-d ' +epprd_rg:cl_sync_vgs(0.237):datavg[check_sync:96] removed_disklist='' +epprd_rg:cl_sync_vgs(0.237):datavg[check_sync:100] : Proceeed if there are some disks that LVM thinks are inaccessable +epprd_rg:cl_sync_vgs(0.237):datavg[check_sync:102] [[ -n '' ]] +epprd_rg:cl_sync_vgs(0.237):datavg[check_sync:102] [[ -n '' ]] +epprd_rg:cl_sync_vgs(0.237):datavg[check_sync:196] : sync if any LVs in the VG that have stale partitions +epprd_rg:cl_sync_vgs(0.237):datavg[check_sync:198] (( 0 == 0 )) +epprd_rg:cl_sync_vgs(0.237):datavg[check_sync:201] : A status of 2,3,5 or 7 indicates the presence of dirty and/or stale partitions +epprd_rg:cl_sync_vgs(0.237):datavg[check_sync:213] is_start_logged=0 +epprd_rg:cl_sync_vgs(0.237):datavg[check_sync:218] at_least_one_sync_success=0 +epprd_rg:cl_sync_vgs(0.238):datavg[check_sync:219] lqueryvg -g 00c44af100004b00000001851e9dc053 -L +epprd_rg:cl_sync_vgs(0.241):datavg[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.311):datavg[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.311):datavg.epprdaloglv[check_sync:221] PS4_LOOP=datavg.epprdaloglv +epprd_rg:cl_sync_vgs(0.311):datavg.epprdaloglv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.311):datavg.epprdaloglv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.311):datavg.epprdaloglv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.311):datavg.epprdaloglv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.311):datavg.epprdaloglv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.311):datavg.saplv[check_sync:221] PS4_LOOP=datavg.saplv +epprd_rg:cl_sync_vgs(0.311):datavg.saplv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.311):datavg.saplv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.311):datavg.saplv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.311):datavg.saplv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.311):datavg.saplv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.311):datavg.sapmntlv[check_sync:221] PS4_LOOP=datavg.sapmntlv +epprd_rg:cl_sync_vgs(0.311):datavg.sapmntlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.311):datavg.sapmntlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.311):datavg.sapmntlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.311):datavg.sapmntlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.311):datavg.sapmntlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.311):datavg.oraclelv[check_sync:221] PS4_LOOP=datavg.oraclelv +epprd_rg:cl_sync_vgs(0.311):datavg.oraclelv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.311):datavg.oraclelv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.311):datavg.oraclelv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.311):datavg.oraclelv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.311):datavg.oraclelv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.311):datavg.epplv[check_sync:221] PS4_LOOP=datavg.epplv +epprd_rg:cl_sync_vgs(0.311):datavg.epplv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.311):datavg.epplv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.311):datavg.epplv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.311):datavg.epplv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.311):datavg.epplv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.311):datavg.oraarchlv[check_sync:221] PS4_LOOP=datavg.oraarchlv +epprd_rg:cl_sync_vgs(0.311):datavg.oraarchlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.311):datavg.oraarchlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.311):datavg.oraarchlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.311):datavg.oraarchlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.311):datavg.oraarchlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.311):datavg.sapdata1lv[check_sync:221] PS4_LOOP=datavg.sapdata1lv +epprd_rg:cl_sync_vgs(0.311):datavg.sapdata1lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.311):datavg.sapdata1lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.311):datavg.sapdata1lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.312):datavg.sapdata1lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.312):datavg.sapdata1lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.312):datavg.sapdata2lv[check_sync:221] PS4_LOOP=datavg.sapdata2lv +epprd_rg:cl_sync_vgs(0.312):datavg.sapdata2lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.312):datavg.sapdata2lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.312):datavg.sapdata2lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.312):datavg.sapdata2lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.312):datavg.sapdata2lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.312):datavg.sapdata3lv[check_sync:221] PS4_LOOP=datavg.sapdata3lv +epprd_rg:cl_sync_vgs(0.312):datavg.sapdata3lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.312):datavg.sapdata3lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.312):datavg.sapdata3lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.312):datavg.sapdata3lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.312):datavg.sapdata3lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.312):datavg.sapdata4lv[check_sync:221] PS4_LOOP=datavg.sapdata4lv +epprd_rg:cl_sync_vgs(0.312):datavg.sapdata4lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.312):datavg.sapdata4lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.312):datavg.sapdata4lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.312):datavg.sapdata4lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.312):datavg.sapdata4lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.312):datavg.boardlv[check_sync:221] PS4_LOOP=datavg.boardlv +epprd_rg:cl_sync_vgs(0.312):datavg.boardlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.312):datavg.boardlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.312):datavg.boardlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.312):datavg.boardlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.312):datavg.boardlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.312):datavg.origlogAlv[check_sync:221] PS4_LOOP=datavg.origlogAlv +epprd_rg:cl_sync_vgs(0.312):datavg.origlogAlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.312):datavg.origlogAlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.312):datavg.origlogAlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.312):datavg.origlogAlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.312):datavg.origlogAlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.312):datavg.origlogBlv[check_sync:221] PS4_LOOP=datavg.origlogBlv +epprd_rg:cl_sync_vgs(0.312):datavg.origlogBlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.312):datavg.origlogBlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.312):datavg.origlogBlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.312):datavg.origlogBlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.312):datavg.origlogBlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogAlv[check_sync:221] PS4_LOOP=datavg.mirrlogAlv +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogAlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogAlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogAlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogAlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogAlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogBlv[check_sync:221] PS4_LOOP=datavg.mirrlogBlv +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogBlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogBlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogBlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogBlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogBlv[check_sync:268] [[ -n RG_MOVE_COMPLETE ]] +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogBlv[check_sync:268] (( 0 == 1 )) +epprd_rg:cl_sync_vgs[355] exit 0 +epprd_rg:clmanageroha[roha_session_open:187] echo epprd_app +epprd_rg:clmanageroha[roha_session_open:187] sort -u +epprd_rg:clmanageroha[roha_session_open:187] echo '\nepprd_app' +epprd_rg:clmanageroha[roha_session_open:187] [[ -z '' ]] +epprd_rg:clmanageroha[roha_session_open:189] roha_session_log 'INFO: No ROHA configured on applications.\n' [ROHALOG:27001166:(0.551)] INFO: No ROHA configured on applications. [ROHALOG:27001166:(0.551)] +epprd_rg:clmanageroha[roha_session_open:190] no_roha_apps=1 +epprd_rg:clmanageroha[roha_session_open:195] read_tunables +epprd_rg:clmanageroha[roha_session_open:196] echo '' +epprd_rg:clmanageroha[roha_session_open:196] grep -q epprda +epprd_rg:clmanageroha[roha_session_open:197] (( 1 == 0 )) +epprd_rg:clmanageroha[roha_session_open:202] (( 1 == 1 )) +epprd_rg:clmanageroha[roha_session_open:203] roha_session_read_odm_dynresop DLPAR_MEM +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_MEM -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:203] (( 0 == 0.00 )) +epprd_rg:clmanageroha[roha_session_open:204] roha_session_read_odm_dynresop DLPAR_PROCS +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_PROCS -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:204] (( 0 == 0 )) +epprd_rg:clmanageroha[roha_session_open:205] roha_session_read_odm_dynresop DLPAR_PROC_UNITS +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_PROC_UNITS -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:205] (( 0 == 0.00 )) +epprd_rg:clmanageroha[roha_session_open:206] roha_session_log 'INFO: Nothing to be done.\n' [ROHALOG:27001166:(0.609)] INFO: Nothing to be done. [ROHALOG:27001166:(0.609)] +epprd_rg:clmanageroha[roha_session_open:207] exit 0 +epprd_rg:process_resources[process_applications:335] RC=0 +epprd_rg:process_resources[process_applications:336] (( 0 != 0 )) +epprd_rg:process_resources[process_applications:343] (( LPAR_ACQUIRE_FAILED == 0 )) +epprd_rg:process_resources[process_applications:345] : Loop through all groups to start or stop applications +epprd_rg:process_resources[process_applications:348] export GROUPNAME +epprd_rg:process_resources[process_applications:351] : Break out application data +epprd_rg:process_resources[process_applications:353] get_list_head epprd_app +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo epprd_app +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo epprd_app +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[process_applications:353] read LIST_OF_APPLICATIONS_FOR_RG +epprd_rg:process_resources[process_applications:354] get_list_tail epprd_app +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo epprd_app +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[process_applications:354] read ALL_APPLICATIONS +epprd_rg:process_resources[process_applications:356] get_list_head +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[process_applications:356] read MISCDATA_FOR_RG +epprd_rg:process_resources[process_applications:357] get_list_tail +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[process_applications:357] read MISCDATA +epprd_rg:process_resources[process_applications:359] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[process_applications:374] APPLICATIONS=epprd_app +epprd_rg:process_resources[process_applications:374] export APPLICATIONS +epprd_rg:process_resources[process_applications:375] MISC_DATA='' +epprd_rg:process_resources[process_applications:375] export MISC_DATA +epprd_rg:process_resources[process_applications:378] : Now call start_or_stop_applications_for_rg to do the app start/stop. +epprd_rg:process_resources[process_applications:384] : Add PID of the last bg start_or_stop_applications_for_rg process to WAITPIDS. +epprd_rg:process_resources[process_applications:386] WAITPIDS=' 27722126' +epprd_rg:process_resources[process_applications:390] : Wait for the start_or_stop_applications_for_rg PIDs to finish. +epprd_rg:process_resources[process_applications:393] wait 27722126 +epprd_rg:process_resources[process_applications:381] start_or_stop_applications_for_rg ACQUIRE /var/hacmp/log/.process_resources_applications.26804672.epprd_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:248] PS4_FUNC=start_or_stop_applications_for_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:248] typeset PS4_FUNC +epprd_rg:process_resources[start_or_stop_applications_for_rg:249] [[ high == high ]] +epprd_rg:process_resources[start_or_stop_applications_for_rg:249] set -x +epprd_rg:process_resources[start_or_stop_applications_for_rg:251] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[start_or_stop_applications_for_rg:253] cmd_to_execute=start_server +epprd_rg:process_resources[start_or_stop_applications_for_rg:259] : File name to store our exit status +epprd_rg:process_resources[start_or_stop_applications_for_rg:261] STATUS_FILE=/var/hacmp/log/.process_resources_applications.26804672.epprd_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:264] : Use clcallev to run the event +epprd_rg:process_resources[start_or_stop_applications_for_rg:266] clcallev start_server epprd_app Jan 28 2023 17:11:26 EVENT START: start_server epprd_app |2023-01-28T17:11:26|28698|EVENT START: start_server epprd_app| +epprd_rg:start_server[+206] version=%I% +epprd_rg:start_server[+210] export TMP_FILE=/var/hacmp/log/.start_server.27001176 +epprd_rg:start_server[+211] export DCD=/etc/es/objrepos +epprd_rg:start_server[+212] export ACD=/usr/es/sbin/cluster/etc/objrepos/active +epprd_rg:start_server[+214] rm -f /var/hacmp/log/.start_server.27001176 +epprd_rg:start_server[+216] STATUS=0 +epprd_rg:start_server[+220] PROC_RES=false +epprd_rg:start_server[+224] [[ APPLICATIONS != 0 ]] +epprd_rg:start_server[+224] [[ APPLICATIONS != GROUP ]] +epprd_rg:start_server[+225] PROC_RES=true +epprd_rg:start_server[+228] set -u +epprd_rg:start_server[+229] typeset WPARNAME EXEC WPARDIR +epprd_rg:start_server[+230] export WPARNAME EXEC WPARDIR +epprd_rg:start_server[+232] EXEC= +epprd_rg:start_server[+233] WPARNAME= +epprd_rg:start_server[+234] WPARDIR= +epprd_rg:start_server[+237] ALLSERVERS=All_servers +epprd_rg:start_server[+238] ALLNOERRSERV=All_nonerror_servers +epprd_rg:start_server[+239] cl_RMupdate resource_acquiring All_servers start_server 2023-01-28T17:11:26.505465 2023-01-28T17:11:26.509687 +epprd_rg:start_server[+241] +epprd_rg:start_server[+241] clwparname epprd_rg +epprd_rg:clwparname[38] version=1.3.1.1 +epprd_rg:clwparname[44] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clwparname[44] [[ -z '' ]] +epprd_rg:clwparname[44] exit 0 WPARNAME= +epprd_rg:start_server[+243] (( 0 == 0 )) +epprd_rg:start_server[+243] [[ -n ]] +epprd_rg:start_server[+258] start_and_monitor_server epprd_app +epprd_rg:start_server[start_and_monitor_server+5] +epprd_rg:start_server[+261] wait RETURN_STATUS=0 +epprd_rg:start_server[start_and_monitor_server+7] server=epprd_app +epprd_rg:start_server[start_and_monitor_server+12] echo Checking whether epprd_app is already running...\n Checking whether epprd_app is already running... +epprd_rg:start_server[start_and_monitor_server+12] [[ -n ]] +epprd_rg:start_server[start_and_monitor_server+18] cl_app_startup_monitor -s epprd_app -a +epprd_rg:start_server[start_and_monitor_server+21] RETURN_STATUS=1 +epprd_rg:start_server[start_and_monitor_server+22] : exit status of cl_app_startup_monitor is: 1 +epprd_rg:start_server[start_and_monitor_server+22] [[ 1 == 0 ]] +epprd_rg:start_server[start_and_monitor_server+33] echo Application monitor(s) indicate that epprd_app is not active. Continuing with application startup.\n Application monitor(s) indicate that epprd_app is not active. Continuing with application startup. +epprd_rg:start_server[start_and_monitor_server+42] +epprd_rg:start_server[start_and_monitor_server+42] cllsserv -cn epprd_app +epprd_rg:start_server[start_and_monitor_server+42] cut -d: -f2 START=/etc/hacmp/epprd_start.sh +epprd_rg:start_server[start_and_monitor_server+43] +epprd_rg:start_server[start_and_monitor_server+43] echo /etc/hacmp/epprd_start.sh +epprd_rg:start_server[start_and_monitor_server+43] cut -d -f1 START_SCRIPT=/etc/hacmp/epprd_start.sh +epprd_rg:start_server[start_and_monitor_server+44] +epprd_rg:start_server[start_and_monitor_server+44] cllsserv -cn epprd_app +epprd_rg:start_server[start_and_monitor_server+44] cut -d: -f4 START_MODE=background +epprd_rg:start_server[start_and_monitor_server+44] [[ -z background ]] +epprd_rg:start_server[start_and_monitor_server+47] PATTERN=epprda epprd_app +epprd_rg:start_server[start_and_monitor_server+48] RETURN_STATUS=0 +epprd_rg:start_server[start_and_monitor_server+51] amlog_trace Starting application controller in background|epprd_app +epprd_rg:start_server[start_and_monitor_server+200] clcycle clavailability.log +epprd_rg:start_server[start_and_monitor_server+200] 1> /dev/null 2>& 1 +epprd_rg:start_server[start_and_monitor_server+200] +epprd_rg:start_server[start_and_monitor_server+200] cltime DATE=2023-01-28T17:11:26.560622 +epprd_rg:start_server[start_and_monitor_server+200] echo |2023-01-28T17:11:26.560622|INFO: Starting application controller in background|epprd_app +epprd_rg:start_server[start_and_monitor_server+200] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:start_server[start_and_monitor_server+51] [[ -n ]] +epprd_rg:start_server[start_and_monitor_server+51] [[ -z ]] +epprd_rg:start_server[start_and_monitor_server+51] [[ -x /etc/hacmp/epprd_start.sh ]] +epprd_rg:start_server[start_and_monitor_server+60] [ background == background ] +epprd_rg:start_server[start_and_monitor_server+62] date +epprd_rg:start_server[start_and_monitor_server+62] LC_ALL=C +epprd_rg:start_server[start_and_monitor_server+62] echo Running application controller start script for epprd_app in the background at Sat Jan 28 17:11:26 KORST 2023.\n Running application controller start script for epprd_app in the background at Sat Jan 28 17:11:26 KORST 2023. +epprd_rg:start_server[start_and_monitor_server+63] /etc/hacmp/epprd_start.sh +epprd_rg:start_server[start_and_monitor_server+63] ODMDIR=/etc/es/objrepos +epprd_rg:start_server[start_and_monitor_server+62] [[ 0 != 0 ]] +epprd_rg:start_server[start_and_monitor_server+62] [[ -n ]] +epprd_rg:start_server[start_and_monitor_server+94] cl_app_startup_monitor -s epprd_app +epprd_rg:start_server[start_and_monitor_server+97] RETURN_STATUS=0 +epprd_rg:start_server[start_and_monitor_server+98] : exit status of cl_app_startup_monitor is: 0 +epprd_rg:start_server[start_and_monitor_server+98] [[ 0 != 0 ]] +epprd_rg:start_server[start_and_monitor_server+109] echo epprd_app 0 +epprd_rg:start_server[start_and_monitor_server+109] 1> /var/hacmp/log/.start_server.27001176.epprd_app +epprd_rg:start_server[start_and_monitor_server+112] +epprd_rg:start_server[start_and_monitor_server+112] cllsserv -cn epprd_app +epprd_rg:start_server[start_and_monitor_server+112] cut -d: -f4 START_MODE=background +epprd_rg:start_server[start_and_monitor_server+112] [[ background == foreground ]] +epprd_rg:start_server[start_and_monitor_server+132] return 0 +epprd_rg:start_server[+266] +epprd_rg:start_server[+266] cllsserv -cn epprd_app +epprd_rg:start_server[+266] cut -d: -f4 START_MODE=background +epprd_rg:start_server[+267] [ background == background ] +epprd_rg:start_server[+269] +epprd_rg:start_server[+269] cat /var/hacmp/log/.start_server.27001176.epprd_app +epprd_rg:start_server[+269] cut -f2 -d SUCCESS=0 +epprd_rg:start_server[+269] [[ 0 != 0 ]] +epprd_rg:start_server[+274] amlog_trace Starting application controller in background|epprd_app +epprd_rg:start_server[+200] clcycle clavailability.log +epprd_rg:start_server[+200] 1> /dev/null 2>& 1 +epprd_rg:start_server[+200] +epprd_rg:start_server[+200] cltime DATE=2023-01-28T17:11:26.605943 +epprd_rg:start_server[+200] echo |2023-01-28T17:11:26.605943|INFO: Starting application controller in background|epprd_app +epprd_rg:start_server[+200] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:start_server[+276] +epprd_rg:start_server[+276] clodmget -q name = epprd_app -n -f cpu_usage_monitor HACMPserver MACTIVE=no +epprd_rg:start_server[+276] [[ no == yes ]] +epprd_rg:start_server[+292] +epprd_rg:start_server[+292] cat /var/hacmp/log/.start_server.27001176.epprd_app +epprd_rg:start_server[+292] cut -f2 -d SUCCESS=0 +epprd_rg:start_server[+292] [[ 0 != +([0-9]) ]] +epprd_rg:start_server[+297] (( 0 != 0 )) +epprd_rg:start_server[+303] [[ 0 == 0 ]] +epprd_rg:start_server[+306] rm -f /var/hacmp/log/.start_server.27001176.epprd_app +epprd_rg:start_server[+308] cl_RMupdate resource_up All_nonerror_servers start_server 2023-01-28T17:11:26.635818 2023-01-28T17:11:26.639993 +epprd_rg:start_server[+314] exit 0 Jan 28 2023 17:11:26 EVENT COMPLETED: start_server epprd_app 0 |2023-01-28T17:11:26|28698|EVENT COMPLETED: start_server epprd_app 0| +epprd_rg:process_resources[start_or_stop_applications_for_rg:267] RC=0 +epprd_rg:process_resources[start_or_stop_applications_for_rg:269] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[start_or_stop_applications_for_rg:279] (( 0 != 0 )) +epprd_rg:process_resources[start_or_stop_applications_for_rg:291] : Store the result for later accumulation +epprd_rg:process_resources[start_or_stop_applications_for_rg:293] print 'epprd_rg 0' +epprd_rg:process_resources[start_or_stop_applications_for_rg:293] 1>> /var/hacmp/log/.process_resources_applications.26804672.epprd_rg +epprd_rg:process_resources[process_applications:396] : Look at all the status files to see if any were unsuccessful +epprd_rg:process_resources[process_applications:399] cat /var/hacmp/log/.process_resources_applications.26804672.epprd_rg +epprd_rg:process_resources[process_applications:399] read skip SUCCESS rest +epprd_rg:process_resources[process_applications:401] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:411] rm -f /var/hacmp/log/.process_resources_applications.26804672.epprd_rg +epprd_rg:process_resources[process_applications:416] : Release lpar resources in one-shot now that applications are stopped +epprd_rg:process_resources[process_applications:418] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[process_applications:433] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:434] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:435] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:439] return 0 +epprd_rg:process_resources[3550] RC=0 +epprd_rg:process_resources[3551] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T17:11:26.737941 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=ONLINE RESOURCE_GROUPS='"epprd_rg"' +epprd_rg:process_resources[1] JOB_TYPE=ONLINE +epprd_rg:process_resources[1] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ ONLINE == RELEASE ]] +epprd_rg:process_resources[3360] [[ ONLINE == ONLINE ]] +epprd_rg:process_resources[3363] INFO_STRING='' +epprd_rg:process_resources[3364] clnodename +epprd_rg:process_resources[3373] ENV_VAR=GROUP_epprd_rg_epprda +epprd_rg:process_resources[3374] eval 'echo $GROUP_epprd_rg_epprda' +epprd_rg:process_resources[1] echo WILLBEUPPOSTEVENT +epprd_rg:process_resources[3374] read ENV_VAR +epprd_rg:process_resources[3375] [[ WILLBEUPPOSTEVENT == WILLBEUPPOSTEVENT ]] +epprd_rg:process_resources[3376] INFO_STRING='|DESTINATION=epprda' +epprd_rg:process_resources[3377] IS_SERVICE_STOP=0 +epprd_rg:process_resources[3379] [[ WILLBEUPPOSTEVENT == ISUPPREEVENT ]] +epprd_rg:process_resources[3373] ENV_VAR=GROUP_epprd_rg_epprds +epprd_rg:process_resources[3374] eval 'echo $GROUP_epprd_rg_epprds' +epprd_rg:process_resources[1] echo +epprd_rg:process_resources[3374] read ENV_VAR +epprd_rg:process_resources[3375] [[ '' == WILLBEUPPOSTEVENT ]] +epprd_rg:process_resources[3379] [[ '' == ISUPPREEVENT ]] +epprd_rg:process_resources[3384] (( 1 == 0 && 0 ==0 )) +epprd_rg:process_resources[3673] set_resource_group_state UP +epprd_rg:process_resources[set_resource_group_state:82] PS4_FUNC=set_resource_group_state +epprd_rg:process_resources[set_resource_group_state:82] typeset PS4_FUNC +epprd_rg:process_resources[set_resource_group_state:83] [[ high == high ]] +epprd_rg:process_resources[set_resource_group_state:83] set -x +epprd_rg:process_resources[set_resource_group_state:84] STAT=0 +epprd_rg:process_resources[set_resource_group_state:85] new_status=UP +epprd_rg:process_resources[set_resource_group_state:89] export GROUPNAME +epprd_rg:process_resources[set_resource_group_state:90] [[ UP != DOWN ]] +epprd_rg:process_resources[set_resource_group_state:92] clchdaemons -d clstrmgr_scripts -t resource_locator -n epprda -o epprd_rg -v UP +epprd_rg:process_resources[set_resource_group_state:100] : Resource Manager Updates +epprd_rg:process_resources[set_resource_group_state:116] cl_RMupdate rg_up epprd_rg process_resources 2023-01-28T17:11:26.776941 2023-01-28T17:11:26.781190 +epprd_rg:process_resources[set_resource_group_state:118] amlog_trace '' 'acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:318] clcycle clavailability.log +epprd_rg:process_resources[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:process_resources[amlog_trace:319] cltime +epprd_rg:process_resources[amlog_trace:319] DATE=2023-01-28T17:11:26.811032 +epprd_rg:process_resources[amlog_trace:320] echo '|2023-01-28T17:11:26.811032|INFO: acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:process_resources[set_resource_group_state:153] return 0 +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T17:11:26.822862 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=NONE +epprd_rg:process_resources[1] JOB_TYPE=NONE +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ NONE == RELEASE ]] +epprd_rg:process_resources[3360] [[ NONE == ONLINE ]] +epprd_rg:process_resources[3729] break +epprd_rg:process_resources[3740] : If sddsrv was turned off above, turn it back on again +epprd_rg:process_resources[3742] [[ FALSE == TRUE ]] +epprd_rg:process_resources[3747] exit 0 +epprd_rg:rg_move_complete[455] STATUS=0 +epprd_rg:rg_move_complete[456] : The exit status of process_resources is: 0 +epprd_rg:rg_move_complete[461] unset AM_SYNC_CALLED_BY +epprd_rg:rg_move_complete[462] [[ TRUE == TRUE ]] +epprd_rg:rg_move_complete[491] [[ -z '' ]] +epprd_rg:rg_move_complete[493] RESOURCE_GROUPS=epprd_rg +epprd_rg:rg_move_complete[499] GROUPNAME=epprd_rg +epprd_rg:rg_move_complete[499] export GROUPNAME +epprd_rg:rg_move_complete[501] cl_rrmethods2call postrg_move +epprd_rg:cl_rrmethods2call[56] version=%I% +epprd_rg:cl_rrmethods2call[84] RRMETHODS='' +epprd_rg:cl_rrmethods2call[85] NEED_RR_ENV_VARS=no +epprd_rg:cl_rrmethods2call[124] NEED_RR_ENV_VARS=yes +epprd_rg:cl_rrmethods2call[129] : Set the '*_REP_RESOURCE' variables if needed. +epprd_rg:cl_rrmethods2call[131] [[ yes == yes ]] +epprd_rg:cl_rrmethods2call[133] cllsres +epprd_rg:cl_rrmethods2call[133] 2> /dev/null +epprd_rg:cl_rrmethods2call[133] eval APPLICATIONS='"epprd_app"' EXPORT_FILESYSTEM='"/board_org"' FILESYSTEM='""' FORCED_VARYON='"false"' FSCHECK_TOOL='"fsck"' FS_BEFORE_IPADDR='"false"' MOUNT_FILESYSTEM='"/board;/board_org"' RECOVERY_METHOD='"sequential"' SERVICE_LABEL='"epprd"' SSA_DISK_FENCING='"false"' VG_AUTO_IMPORT='"false"' VOLUME_GROUP='"datavg"' USERDEFINED_RESOURCES='""' +epprd_rg:cl_rrmethods2call[1] APPLICATIONS=epprd_app +epprd_rg:cl_rrmethods2call[1] EXPORT_FILESYSTEM=/board_org +epprd_rg:cl_rrmethods2call[1] FILESYSTEM='' +epprd_rg:cl_rrmethods2call[1] FORCED_VARYON=false +epprd_rg:cl_rrmethods2call[1] FSCHECK_TOOL=fsck +epprd_rg:cl_rrmethods2call[1] FS_BEFORE_IPADDR=false +epprd_rg:cl_rrmethods2call[1] MOUNT_FILESYSTEM='/board;/board_org' +epprd_rg:cl_rrmethods2call[1] RECOVERY_METHOD=sequential +epprd_rg:cl_rrmethods2call[1] SERVICE_LABEL=epprd +epprd_rg:cl_rrmethods2call[1] SSA_DISK_FENCING=false +epprd_rg:cl_rrmethods2call[1] VG_AUTO_IMPORT=false +epprd_rg:cl_rrmethods2call[1] VOLUME_GROUP=datavg +epprd_rg:cl_rrmethods2call[1] USERDEFINED_RESOURCES='' +epprd_rg:cl_rrmethods2call[137] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[142] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[147] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[152] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[157] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[162] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[167] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[172] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[182] [[ -z '' ]] +epprd_rg:cl_rrmethods2call[184] typeset sysmgdata +epprd_rg:cl_rrmethods2call[185] typeset reposmgdata +epprd_rg:cl_rrmethods2call[186] [[ -x /usr/es/sbin/cluster/xd_generic/xd_cli/clxd_list_mg_smit ]] +epprd_rg:cl_rrmethods2call[191] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[191] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[197] echo '' +epprd_rg:cl_rrmethods2call[199] return 0 +epprd_rg:rg_move_complete[501] METHODS='' +epprd_rg:rg_move_complete[516] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. +epprd_rg:rg_move_complete[518] exit 0 Jan 28 2023 17:11:26 EVENT COMPLETED: rg_move_complete epprda 1 0 |2023-01-28T17:11:26|28698|EVENT COMPLETED: rg_move_complete epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T17:11:26.935403 + echo '|2023-01-28T17:11:26.935403|INFO: rg_move_complete|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log PowerHA SystemMirror Event Summary ---------------------------------------------------------------------------- Serial number for this event: 28698 Event: TE_RG_MOVE_ACQUIRE Start time: Sat Jan 28 17:10:33 2023 End time: Sat Jan 28 17:11:26 2023 Action: Resource: Script Name: ---------------------------------------------------------------------------- Acquiring resource group: epprd_rg process_resources Search on: Sat.Jan.28.17:10:34.KORST.2023.process_resources.epprd_rg.ref Acquiring resource: All_service_addrs acquire_service_addr Search on: Sat.Jan.28.17:10:34.KORST.2023.acquire_service_addr.All_service_addrs.epprd_rg.ref Resource online: All_nonerror_service_addrs acquire_service_addr Search on: Sat.Jan.28.17:10:35.KORST.2023.acquire_service_addr.All_nonerror_service_addrs.epprd_rg.ref Acquiring resource: All_volume_groups cl_activate_vgs Search on: Sat.Jan.28.17:10:35.KORST.2023.cl_activate_vgs.All_volume_groups.epprd_rg.ref Resource online: All_nonerror_volume_groups cl_activate_vgs Search on: Sat.Jan.28.17:10:39.KORST.2023.cl_activate_vgs.All_nonerror_volume_groups.epprd_rg.ref Acquiring resource: All_filesystems cl_activate_fs Search on: Sat.Jan.28.17:10:40.KORST.2023.cl_activate_fs.All_filesystems.epprd_rg.ref Resource online: All_non_error_filesystems cl_activate_fs Search on: Sat.Jan.28.17:10:44.KORST.2023.cl_activate_fs.All_non_error_filesystems.epprd_rg.ref Acquiring resource: All_exports cl_export_fs Search on: Sat.Jan.28.17:10:54.KORST.2023.cl_export_fs.All_exports.epprd_rg.ref Resource online: All_nonerror_exports cl_export_fs Search on: Sat.Jan.28.17:10:54.KORST.2023.cl_export_fs.All_nonerror_exports.epprd_rg.ref Acquiring resource: All_nfs_mounts cl_activate_nfs Search on: Sat.Jan.28.17:10:54.KORST.2023.cl_activate_nfs.All_nfs_mounts.epprd_rg.ref Acquiring resource: All_servers start_server Search on: Sat.Jan.28.17:11:26.KORST.2023.start_server.All_servers.epprd_rg.ref Resource online: All_nonerror_servers start_server Search on: Sat.Jan.28.17:11:26.KORST.2023.start_server.All_nonerror_servers.epprd_rg.ref Resource group online: epprd_rg process_resources Search on: Sat.Jan.28.17:11:26.KORST.2023.process_resources.epprd_rg.ref ---------------------------------------------------------------------------- |EVENT_SUMMARY_START|TE_RG_MOVE_ACQUIRE|2023-01-28T17:10:33|2023-01-28T17:11:26|28698| |EV_SUM_SEARCHON_STR|Sat.Jan.28.17:10:34.KORST.2023.process_resources.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.17:10:34.KORST.2023.acquire_service_addr.All_service_addrs.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.17:10:35.KORST.2023.acquire_service_addr.All_nonerror_service_addrs.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.17:10:35.KORST.2023.cl_activate_vgs.All_volume_groups.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.17:10:39.KORST.2023.cl_activate_vgs.All_nonerror_volume_groups.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.17:10:40.KORST.2023.cl_activate_fs.All_filesystems.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.17:10:44.KORST.2023.cl_activate_fs.All_non_error_filesystems.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.17:10:54.KORST.2023.cl_export_fs.All_exports.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.17:10:54.KORST.2023.cl_export_fs.All_nonerror_exports.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.17:10:54.KORST.2023.cl_activate_nfs.All_nfs_mounts.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.17:11:26.KORST.2023.start_server.All_servers.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.17:11:26.KORST.2023.start_server.All_nonerror_servers.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.17:11:26.KORST.2023.process_resources.epprd_rg.ref.ref| |EVENT_SUMMARY_END| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 28699 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_NODE_DEP_COMPLETE|2023-01-28T17:11:28|28699| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Jan 28 2023 17:11:29 EVENT START: node_up_complete epprda |2023-01-28T17:11:29|28699|EVENT START: node_up_complete epprda| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T17:11:29.142355 + echo '|2023-01-28T17:11:29.142355|INFO: node_up_complete|epprda' + 1>> /var/hacmp/availability/clavailability.log + version=%I% + set -a + cllsparam -n epprda + eval NODE_NAME=epprda VERBOSE_LOGGING=high PS4=$'\'${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO]' $'\'' DEBUG_LEVEL=Standard LC_ALL=$'\'C\'' + NODE_NAME=epprda + VERBOSE_LOGGING=high :node_up_complete[1] PS4='${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO] ' :node_up_complete[1] DEBUG_LEVEL=Standard :node_up_complete[1] LC_ALL=C :node_up_complete[80] set +a :node_up_complete[82] NODENAME=epprda :node_up_complete[83] RC=0 :node_up_complete[83] typeset -i RC :node_up_complete[84] UPDATESTATD=0 :node_up_complete[84] typeset -i UPDATESTATD :node_up_complete[86] LPM_IN_PROGRESS_DIR=/var/hacmp/.lpm_in_progress :node_up_complete[86] typeset LPM_IN_PROGRESS_DIR :node_up_complete[87] LPM_IN_PROGRESS_PREFIX=lpm :node_up_complete[87] typeset LPM_IN_PROGRESS_PREFIX :node_up_complete[88] STATE_FILE=/var/hacmp/cl_dr.state :node_up_complete[88] typeset STATE_FILE :node_up_complete[97] STATUS=0 :node_up_complete[99] set -u :node_up_complete[101] (( 1 < 1 )) :node_up_complete[107] START_MODE='' :node_up_complete[107] typeset START_MODE :node_up_complete[108] (( 1 > 1 )) :node_up_complete[114] : serial number for this event is 28699 :node_up_complete[118] RPCLOCKDSTOPPED=0 :node_up_complete[118] typeset -i RPCLOCKDSTOPPED :node_up_complete[119] [[ -f /tmp/.RPCLOCKDSTOPPED ]] :node_up_complete[127] clnodename :node_up_complete[127] wc -l :node_up_complete[127] (( 2 == 2 )) :node_up_complete[129] clodmget -f group -n HACMPgroup :node_up_complete[129] RESOURCE_GROUPS=epprd_rg :node_up_complete[132] clodmget -q group='epprd_rg AND name=EXPORT_FILESYSTEM' -f value -n HACMPresource :node_up_complete[132] EXPORTLIST=/board_org :node_up_complete[133] [[ -n /board_org ]] :node_up_complete[135] UPDATESTATD=1 :node_up_complete[136] [[ epprda == epprda ]] :node_up_complete[139] lssrc -s rpc.statd :node_up_complete[139] LC_ALL=C :node_up_complete[139] grep inoperative :node_up_complete[140] (( 1 == 0 )) :node_up_complete[146] cl_update_statd :cl_update_statd(0)[+174] version=%I% :cl_update_statd(0)[+176] typeset -i RC=0 :cl_update_statd(0)[+178] LOCAL_FOUND= :cl_update_statd(0)[+179] TWIN_NAME= :cl_update_statd(0)[+180] [[ -z epprda ]] :cl_update_statd(0)[+181] :cl_update_statd(0)[+181] cl_get_path -S OP_SEP=~ :cl_update_statd(0)[+182] set -u :cl_update_statd(0)[+187] LOCAL_FOUND=true :cl_update_statd(0)[+194] : Make sure statd is running locally :cl_update_statd(0)[+196] lssrc -s statd :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw inoperative :cl_update_statd(0)[+196] rpcinfo -p :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw status :cl_update_statd(0)[+207] : Get the current twin, if there is one :cl_update_statd(0)[+209] :cl_update_statd(0)[+209] nfso -H sm_gethost :cl_update_statd(0)[+209] 2>& 1 CURTWIN= :cl_update_statd(0)[+210] RC=0 :cl_update_statd(0)[+212] [[ -z true ]] :cl_update_statd(0)[+212] [[ -z ]] :cl_update_statd(0)[+215] : Local node is no longer a cluster member, unregister its twin :cl_update_statd(0)[+215] [[ -n ]] :cl_update_statd(0)[+259] : RC is actually 0 :cl_update_statd(0)[+266] return 0 :node_up_complete[147] (( 0 )) :node_up_complete[151] break :node_up_complete[156] (( 1 )) :node_up_complete[158] (( 0 )) :node_up_complete[198] [[ TRUE == FALSE ]] :node_up_complete[268] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. :node_up_complete[270] : This is the final clRGinfo output :node_up_complete[272] clRGinfo -p -t :node_up_complete[272] 2>& 1 clRGinfo[431]: version I clRGinfo[517]: Number of resource groups = 0 clRGinfo[562]: cluster epprda_cluster is version = 22 clRGinfo[597]: no resource groups specified on command line - print all clRGinfo[685]: Current group is 'epprd_rg' get primary state info for state 6 get secondary state info for state 6 getPrimaryStateStr: using primary_table => primary_state_table get primary state info for state 4 get secondary state info for state 4 getPrimaryStateStr: using primary_table => primary_state_table Cluster Name: epprda_cluster Resource Group Name: epprd_rg Node Group State Delayed Timers ---------------------------------------------------------------- --------------- ------------------- epprda ONLINE epprds OFFLINE :node_up_complete[277] (( 0 == 0 )) :node_up_complete[279] [[ epprda != epprda ]] :node_up_complete[300] exit 0 Jan 28 2023 17:11:29 EVENT COMPLETED: node_up_complete epprda 0 |2023-01-28T17:11:29|28699|EVENT COMPLETED: node_up_complete epprda 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T17:11:29.342396 + echo '|2023-01-28T17:11:29.342396|INFO: node_up_complete|epprda|0' + 1>> /var/hacmp/availability/clavailability.log PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 22166 Cluster services started on node 'epprds' Node Up Completion Event has been enqueued. ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_NODE_DEP|2023-01-28T17:11:33|22166| |NODE_UP_COMPLETE| |EVENT_PREAMBLE_END| Jan 28 2023 17:11:35 EVENT START: node_up epprds |2023-01-28T17:11:35|22166|EVENT START: node_up epprds| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T17:11:36.014717 + echo '|2023-01-28T17:11:36.014717|INFO: node_up|epprds' + 1>> /var/hacmp/availability/clavailability.log :node_up[182] version=%I% :node_up[185] NODENAME=epprds :node_up[185] export NODENAME :node_up[193] STATUS=0 :node_up[193] typeset -li STATUS :node_up[194] RC=0 :node_up[194] typeset -li RC :node_up[195] ENABLE_NFS_CROSS_MOUNT=false :node_up[196] START_MODE='' :node_up[196] typeset START_MODE :node_up[198] set -u :node_up[200] (( 1 < 1 )) :node_up[200] (( 1 > 2 )) :node_up[207] : serial number for this event is 22166 :node_up[210] [[ epprda == epprds ]] :node_up[219] (( 1 > 1 )) :node_up[256] : If RG_DEPENDENCIES=false, process RGs with clsetenvgrp :node_up[258] [[ TRUE == FALSE ]] :node_up[281] : localnode processing prior to RG acquisition :node_up[283] [[ epprda == epprds ]] :node_up[498] : Enable NFS crossmounts during manual start :node_up[500] [[ -n false ]] :node_up[500] [[ false == true ]] :node_up[607] : When RG dependencies are not configured we call node_up_local/remote, :node_up[608] : followed by process_resources to process any remaining groups :node_up[610] [[ TRUE == FALSE ]] :node_up[657] [[ epprda == epprds ]] :node_up[667] return 0 Jan 28 2023 17:11:36 EVENT COMPLETED: node_up epprds 0 |2023-01-28T17:11:36|22166|EVENT COMPLETED: node_up epprds 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T17:11:36.138887 + echo '|2023-01-28T17:11:36.138887|INFO: node_up|epprds|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 17:11:39 EVENT START: rg_move_fence epprds 1 |2023-01-28T17:11:39|22167|EVENT START: rg_move_fence epprds 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T17:11:39.583928 + echo '|2023-01-28T17:11:39.583928|INFO: rg_move_fence|epprd_rg|epprds|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_fence[62] [[ high == high ]] :rg_move_fence[62] version=1.11 :rg_move_fence[63] NODENAME=epprds :rg_move_fence[63] export NODENAME :rg_move_fence[65] set -u :rg_move_fence[67] [ 2 != 2 ] :rg_move_fence[73] set +u :rg_move_fence[75] [[ -z TRUE ]] :rg_move_fence[80] [[ TRUE == TRUE ]] :rg_move_fence[82] LOCAL_NODENAME=epprda :rg_move_fence[83] odmget -qid=1 HACMPgroup :rg_move_fence[83] egrep 'group =' :rg_move_fence[83] awk '{print $3}' :rg_move_fence[83] eval RGNAME='"epprd_rg"' :rg_move_fence[1] RGNAME=epprd_rg +epprd_rg:rg_move_fence[84] GROUPNAME=epprd_rg +epprd_rg:rg_move_fence[85] group_state='$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[86] set +u +epprd_rg:rg_move_fence[87] eval print '$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[1] print ONLINE +epprd_rg:rg_move_fence[87] RG_MOVE_ONLINE=ONLINE +epprd_rg:rg_move_fence[87] export RG_MOVE_ONLINE +epprd_rg:rg_move_fence[88] set -u +epprd_rg:rg_move_fence[89] RG_MOVE_ONLINE=ONLINE +epprd_rg:rg_move_fence[91] set -a +epprd_rg:rg_move_fence[92] clsetenvgrp epprda rg_move epprd_rg '' :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 +epprd_rg:rg_move_fence[92] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[93] RC=0 +epprd_rg:rg_move_fence[94] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[1] FORCEDOWN_GROUPS='' +epprd_rg:rg_move_fence[2] RESOURCE_GROUPS='' +epprd_rg:rg_move_fence[3] HOMELESS_GROUPS='' +epprd_rg:rg_move_fence[4] HOMELESS_FOLLOWER_GROUPS='' +epprd_rg:rg_move_fence[5] ERRSTATE_GROUPS='' +epprd_rg:rg_move_fence[6] PRINCIPAL_ACTIONS='' +epprd_rg:rg_move_fence[7] ASSOCIATE_ACTIONS='' +epprd_rg:rg_move_fence[8] AUXILLIARY_ACTIONS='' +epprd_rg:rg_move_fence[8] SIBLING_GROUPS='' +epprd_rg:rg_move_fence[9] SIBLING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[10] SIBLING_ACQUIRING_GROUPS='' +epprd_rg:rg_move_fence[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[12] SIBLING_RELEASING_GROUPS='' +epprd_rg:rg_move_fence[13] SIBLING_RELEASING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[95] set +a +epprd_rg:rg_move_fence[96] [ 0 -ne 0 ] +epprd_rg:rg_move_fence[103] process_resources FENCE :rg_move_fence[3318] version=1.169 :rg_move_fence[3321] STATUS=0 :rg_move_fence[3322] sddsrv_off=FALSE :rg_move_fence[3324] true :rg_move_fence[3326] : call rgpa, and it will tell us what to do next :rg_move_fence[3328] set -a :rg_move_fence[3329] clRGPA FENCE :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa FENCE 2023-01-28T17:11:39.687207 clrgpa :clRGPA[+55] exit 0 :rg_move_fence[3329] eval JOB_TYPE=NONE :rg_move_fence[1] JOB_TYPE=NONE :rg_move_fence[3330] RC=0 :rg_move_fence[3331] set +a :rg_move_fence[3333] (( 0 != 0 )) :rg_move_fence[3342] RESOURCE_GROUPS='' :rg_move_fence[3343] GROUPNAME='' :rg_move_fence[3343] export GROUPNAME :rg_move_fence[3353] IS_SERVICE_START=1 :rg_move_fence[3354] IS_SERVICE_STOP=1 :rg_move_fence[3360] [[ NONE == RELEASE ]] :rg_move_fence[3360] [[ NONE == ONLINE ]] :rg_move_fence[3729] break :rg_move_fence[3740] : If sddsrv was turned off above, turn it back on again :rg_move_fence[3742] [[ FALSE == TRUE ]] :rg_move_fence[3747] exit 0 +epprd_rg:rg_move_fence[104] : exit status of process_resources FENCE is: 0 +epprd_rg:rg_move_fence[107] [[ TRUE == TRUE ]] +epprd_rg:rg_move_fence[109] export EVENT_TYPE +epprd_rg:rg_move_fence[110] echo ACQUIRE_PRIMARY_NFS ACQUIRE_PRIMARY_NFS +epprd_rg:rg_move_fence[111] [[ -n '' ]] +epprd_rg:rg_move_fence[141] exit 0 Jan 28 2023 17:11:39 EVENT COMPLETED: rg_move_fence epprds 1 0 |2023-01-28T17:11:39|22167|EVENT COMPLETED: rg_move_fence epprds 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T17:11:39.779807 + echo '|2023-01-28T17:11:39.779807|INFO: rg_move_fence|epprd_rg|epprds|1|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 17:11:39 EVENT START: rg_move_acquire epprds 1 |2023-01-28T17:11:39|22167|EVENT START: rg_move_acquire epprds 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T17:11:39.983945 + echo '|2023-01-28T17:11:39.983945|INFO: rg_move_acquire|epprd_rg|epprds|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_acquire[+54] [[ high == high ]] :rg_move_acquire[+54] version=1.9.1.7 :rg_move_acquire[+57] set -u :rg_move_acquire[+59] [ 2 != 2 ] :rg_move_acquire[+65] set +u :rg_move_acquire[+67] :rg_move_acquire[+67] clodmget -n -q id=1 -f group HACMPgroup RG=epprd_rg :rg_move_acquire[+68] export RG :rg_move_acquire[+70] [[ ACQUIRE_PRIMARY_NFS == ACQUIRE_PRIMARY ]] :rg_move_acquire[+118] clcallev rg_move epprds 1 ACQUIRE Jan 28 2023 17:11:40 EVENT START: rg_move epprds 1 ACQUIRE |2023-01-28T17:11:40|22167|EVENT START: rg_move epprds 1 ACQUIRE| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-01-28T17:11:40.110533 :clevlog[amlog_trace:320] echo '|2023-01-28T17:11:40.110533|INFO: rg_move|epprd_rg|epprds|1|ACQUIRE' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :rg_move[76] version=%I% :rg_move[86] STATUS=0 :rg_move[88] [[ ! -n '' ]] :rg_move[90] EMULATE=REAL :rg_move[96] set -u :rg_move[98] NODENAME=epprds :rg_move[98] export NODENAME :rg_move[99] RGID=1 :rg_move[100] (( 3 == 3 )) :rg_move[102] ACTION=ACQUIRE :rg_move[108] : serial number for this event is 22167 :rg_move[112] RG_UP_POSTEVENT_ON_NODE=epprds :rg_move[112] export RG_UP_POSTEVENT_ON_NODE :rg_move[116] clodmget -qid=1 -f group -n HACMPgroup :rg_move[116] eval RGNAME=epprd_rg :rg_move[1] RGNAME=epprd_rg :rg_move[118] UPDATESTATD=0 :rg_move[119] export UPDATESTATD :rg_move[123] RG_MOVE_EVENT=true :rg_move[123] export RG_MOVE_EVENT :rg_move[128] group_state='$RESGRP_epprd_rg_epprda' :rg_move[129] set +u :rg_move[130] eval print '$RESGRP_epprd_rg_epprda' :rg_move[1] print ONLINE :rg_move[130] RG_MOVE_ONLINE=ONLINE :rg_move[130] export RG_MOVE_ONLINE :rg_move[131] set -u :rg_move[132] RG_MOVE_ONLINE=ONLINE :rg_move[139] rm -f /tmp/.NFSSTOPPED :rg_move[140] rm -f /tmp/.RPCLOCKDSTOPPED :rg_move[147] set -a :rg_move[148] clsetenvgrp epprds rg_move epprd_rg :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprds rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 :rg_move[148] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[149] RC=0 :rg_move[150] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[1] FORCEDOWN_GROUPS='' :rg_move[2] RESOURCE_GROUPS='' :rg_move[3] HOMELESS_GROUPS='' :rg_move[4] HOMELESS_FOLLOWER_GROUPS='' :rg_move[5] ERRSTATE_GROUPS='' :rg_move[6] PRINCIPAL_ACTIONS='' :rg_move[7] ASSOCIATE_ACTIONS='' :rg_move[8] AUXILLIARY_ACTIONS='' :rg_move[8] SIBLING_GROUPS='' :rg_move[9] SIBLING_NODES_BY_GROUP='' :rg_move[10] SIBLING_ACQUIRING_GROUPS='' :rg_move[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' :rg_move[12] SIBLING_RELEASING_GROUPS='' :rg_move[13] SIBLING_RELEASING_NODES_BY_GROUP='' :rg_move[151] set +a :rg_move[155] (( 0 != 0 )) :rg_move[155] [[ -z epprd_rg ]] :rg_move[164] [[ -z TRUE ]] :rg_move[241] AM_SYNC_CALLED_BY=RG_MOVE :rg_move[241] export AM_SYNC_CALLED_BY :rg_move[242] process_resources :process_resources[3318] version=1.169 :process_resources[3321] STATUS=0 :process_resources[3322] sddsrv_off=FALSE :process_resources[3324] true :process_resources[3326] : call rgpa, and it will tell us what to do next :process_resources[3328] set -a :process_resources[3329] clRGPA :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa 2023-01-28T17:11:40.231967 clrgpa :clRGPA[+55] exit 0 :process_resources[3329] eval JOB_TYPE=NONE :process_resources[1] JOB_TYPE=NONE :process_resources[3330] RC=0 :process_resources[3331] set +a :process_resources[3333] (( 0 != 0 )) :process_resources[3342] RESOURCE_GROUPS='' :process_resources[3343] GROUPNAME='' :process_resources[3343] export GROUPNAME :process_resources[3353] IS_SERVICE_START=1 :process_resources[3354] IS_SERVICE_STOP=1 :process_resources[3360] [[ NONE == RELEASE ]] :process_resources[3360] [[ NONE == ONLINE ]] :process_resources[3729] break :process_resources[3740] : If sddsrv was turned off above, turn it back on again :process_resources[3742] [[ FALSE == TRUE ]] :process_resources[3747] exit 0 :rg_move[247] : unsetting AM_SYNC_CALLED_BY from $'callers environment as\n: we dont' require it after this point in execution. :rg_move[250] unset AM_SYNC_CALLED_BY :rg_move[253] [[ -f /tmp/.NFSSTOPPED ]] :rg_move[274] [[ -f /tmp/.RPCLOCKDSTOPPED ]] :rg_move[293] exit 0 Jan 28 2023 17:11:40 EVENT COMPLETED: rg_move epprds 1 ACQUIRE 0 |2023-01-28T17:11:40|22167|EVENT COMPLETED: rg_move epprds 1 ACQUIRE 0| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-01-28T17:11:40.363068 :clevlog[amlog_trace:320] echo '|2023-01-28T17:11:40.363068|INFO: rg_move|epprd_rg|epprds|1|ACQUIRE|0' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :rg_move_acquire[+119] exit_status=0 :rg_move_acquire[+120] : exit status of clcallev rg_move epprds 1 ACQUIRE is: 0 :rg_move_acquire[+121] exit 0 Jan 28 2023 17:11:40 EVENT COMPLETED: rg_move_acquire epprds 1 0 |2023-01-28T17:11:40|22167|EVENT COMPLETED: rg_move_acquire epprds 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T17:11:40.483272 + echo '|2023-01-28T17:11:40.483272|INFO: rg_move_acquire|epprd_rg|epprds|1|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 17:11:40 EVENT START: rg_move_complete epprds 1 |2023-01-28T17:11:40|22167|EVENT START: rg_move_complete epprds 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T17:11:40.664809 + echo '|2023-01-28T17:11:40.664809|INFO: rg_move_complete|epprd_rg|epprds|1' + 1>> /var/hacmp/availability/clavailability.log :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] grep -w epprda :get_local_nodename[63] clnodename :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :rg_move_complete[91] version=%I% :rg_move_complete[97] STATUS=0 :rg_move_complete[97] typeset -li STATUS :rg_move_complete[99] [[ -z '' ]] :rg_move_complete[101] EMULATE=REAL :rg_move_complete[104] set -u :rg_move_complete[106] (( 2 < 2 || 2 > 3 )) :rg_move_complete[112] NODENAME=epprds :rg_move_complete[112] export NODENAME :rg_move_complete[113] RGID=1 :rg_move_complete[114] (( 2 == 3 )) :rg_move_complete[118] RGDESTINATION='' :rg_move_complete[122] : serial number for this event is 22167 :rg_move_complete[126] : Interpret resource group ID into a resource group name. :rg_move_complete[128] clodmget -qid=1 -f group -n HACMPgroup :rg_move_complete[128] eval RGNAME=epprd_rg :rg_move_complete[1] RGNAME=epprd_rg +epprd_rg:rg_move_complete[129] GROUPNAME=epprd_rg +epprd_rg:rg_move_complete[131] UPDATESTATD=0 +epprd_rg:rg_move_complete[131] typeset -li UPDATESTATD +epprd_rg:rg_move_complete[132] NFSSTOPPED=0 +epprd_rg:rg_move_complete[132] typeset -li NFSSTOPPED +epprd_rg:rg_move_complete[133] LIMIT=60 +epprd_rg:rg_move_complete[133] WAIT=1 +epprd_rg:rg_move_complete[133] TRY=0 +epprd_rg:rg_move_complete[133] typeset -li LIMIT WAIT TRY +epprd_rg:rg_move_complete[136] : If this is a two node cluster and exported filesystems exist, then +epprd_rg:rg_move_complete[137] : when the cluster topology is stable notify rpc.statd of the changes. +epprd_rg:rg_move_complete[139] wc -l +epprd_rg:rg_move_complete[139] clnodename +epprd_rg:rg_move_complete[139] (( 2 == 2 )) +epprd_rg:rg_move_complete[141] clodmget -f group -n HACMPgroup +epprd_rg:rg_move_complete[141] RESOURCE_GROUPS=epprd_rg +epprd_rg:rg_move_complete[144] clodmget -q group='epprd_rg AND name=EXPORT_FILESYSTEM' -f value -n HACMPresource +epprd_rg:rg_move_complete[144] EXPORTLIST=/board_org +epprd_rg:rg_move_complete[146] [[ -n /board_org ]] +epprd_rg:rg_move_complete[146] [[ epprd_rg == epprd_rg ]] +epprd_rg:rg_move_complete[148] UPDATESTATD=1 +epprd_rg:rg_move_complete[149] [[ REAL == EMUL ]] +epprd_rg:rg_move_complete[154] cl_update_statd :cl_update_statd(0)[+174] version=%I% :cl_update_statd(0)[+176] typeset -i RC=0 :cl_update_statd(0)[+178] LOCAL_FOUND= :cl_update_statd(0)[+179] TWIN_NAME= :cl_update_statd(0)[+180] [[ -z epprda ]] :cl_update_statd(0)[+181] :cl_update_statd(0)[+181] cl_get_path -S OP_SEP=~ :cl_update_statd(0)[+182] set -u :cl_update_statd(0)[+187] LOCAL_FOUND=true :cl_update_statd(0)[+189] TWIN_NAME=epprds :cl_update_statd(0)[+194] : Make sure statd is running locally :cl_update_statd(0)[+196] grep -qw inoperative :cl_update_statd(0)[+196] lssrc -s statd :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw status :cl_update_statd(0)[+196] rpcinfo -p :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+207] : Get the current twin, if there is one :cl_update_statd(0)[+209] :cl_update_statd(0)[+209] nfso -H sm_gethost :cl_update_statd(0)[+209] 2>& 1 CURTWIN= :cl_update_statd(0)[+210] RC=0 :cl_update_statd(0)[+212] [[ -z true ]] :cl_update_statd(0)[+212] [[ -z epprds ]] :cl_update_statd(0)[+225] : Get the interface to the twin node :cl_update_statd(0)[+227] :cl_update_statd(0)[+227] get_node_ip epprds :cl_update_statd(0)[+9] (( 1 != 1 )) :cl_update_statd(0)[+15] Twin_Name=epprds :cl_update_statd(0)[+16] NewTwin= :cl_update_statd(0)[+19] : Get the Interface details for every interface on the twin node :cl_update_statd(0)[+20] : Reject interfaces on nodes that are not public boot addresses :cl_update_statd(0)[+21] : because those are the only ones we have state information for :cl_update_statd(0)[+23] :cl_update_statd(0)[+23] cllsif -J ~ -Sw -i epprda :cl_update_statd(0)[+23] LC_ALL=C LOCAL_NETWORK_INFO=epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+25] read adapt type network net_type attrib node ip_addr skip interface skip netmask skip skip prefix ip_family :cl_update_statd(0)[+25] IFS=~ :cl_update_statd(0)[+24] cllsif -J ~ -Sw -i epprds :cl_update_statd(0)[+24] LC_ALL=C :cl_update_statd(0)[+25] [[ public != public ]] :cl_update_statd(0)[+25] [[ boot != boot ]] :cl_update_statd(0)[+33] : Find the state of this candidate :cl_update_statd(0)[+33] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+37] :cl_update_statd(0)[+37] tr ./ xx :cl_update_statd(0)[+37] print 61.81.244.123 addr=i61x81x244x123_epprds :cl_update_statd(0)[+43] eval candidate_state=${i61x81x244x123_epprds:-down} :cl_update_statd(0)[+43] candidate_state=UP :cl_update_statd(0)[+46] : If state is UP, check to see if this node can talk to it :cl_update_statd(0)[+46] [[ UP == UP ]] :cl_update_statd(0)[+50] ping -w 5 -c 1 -q 61.81.244.123 :cl_update_statd(0)[+50] 1> /dev/null :cl_update_statd(0)[+61] echo epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+62] read lcl_adapt lcl_type lcl_network lcl_net_type lcl_attrib lcl_node lcl_ip_addr skip lcl_interface skip lcl_netmask skip skip lcl_prefix lcl_ip_family :cl_update_statd(0)[+62] IFS=~ :cl_update_statd(0)[+61] tr \n :cl_update_statd(0)[+62] [[ net_ether_01 != net_ether_01 ]] :cl_update_statd(0)[+62] [[ boot != boot ]] :cl_update_statd(0)[+62] [[ public != public ]] :cl_update_statd(0)[+62] [[ AF_INET != AF_INET ]] :cl_update_statd(0)[+62] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+71] :cl_update_statd(0)[+71] tr ./ xx :cl_update_statd(0)[+71] print 61.81.244.134 addr=i61x81x244x134_epprda :cl_update_statd(0)[+77] eval lcl_candidate_state=${i61x81x244x134_epprda:-down} :cl_update_statd(0)[+77] lcl_candidate_state=UP :cl_update_statd(0)[+77] [[ UP == UP ]] :cl_update_statd(0)[+81] : epprds is on the same network as an interface that is up :cl_update_statd(0)[+82] : on the local node, and the attributes match. :cl_update_statd(0)[+84] NewTwin=epprds :cl_update_statd(0)[+85] break :cl_update_statd(0)[+85] [[ -n epprds ]] :cl_update_statd(0)[+91] break :cl_update_statd(0)[+91] [[ -z epprds ]] :cl_update_statd(0)[+100] echo epprds :cl_update_statd(0)[+101] return 0 NEWTWIN=epprds :cl_update_statd(0)[+227] [[ -z epprds ]] :cl_update_statd(0)[+227] [[ epprds != ]] :cl_update_statd(0)[+243] : Need to register a new twin :cl_update_statd(0)[+243] [[ -n ]] :cl_update_statd(0)[+251] : Register our new twin, epprds :cl_update_statd(0)[+253] nfso -H sm_register epprds :cl_update_statd(0)[+254] RC=0 :cl_update_statd(0)[+259] : RC is actually 0 :cl_update_statd(0)[+266] return 0 +epprd_rg:rg_move_complete[155] (( 0 != 0 )) +epprd_rg:rg_move_complete[160] break +epprd_rg:rg_move_complete[166] : Set the RESOURCE_GROUPS environment variable with the names +epprd_rg:rg_move_complete[167] : of all resource groups participating in this event, and export +epprd_rg:rg_move_complete[168] : them to all successive scripts. +epprd_rg:rg_move_complete[170] set -a +epprd_rg:rg_move_complete[171] clsetenvgrp epprds rg_move_complete epprd_rg :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprds rg_move_complete epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 +epprd_rg:rg_move_complete[171] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_complete[172] RC=0 +epprd_rg:rg_move_complete[173] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_complete[1] FORCEDOWN_GROUPS='' +epprd_rg:rg_move_complete[2] RESOURCE_GROUPS='' +epprd_rg:rg_move_complete[3] HOMELESS_GROUPS='' +epprd_rg:rg_move_complete[4] HOMELESS_FOLLOWER_GROUPS='' +epprd_rg:rg_move_complete[5] ERRSTATE_GROUPS='' +epprd_rg:rg_move_complete[6] PRINCIPAL_ACTIONS='' +epprd_rg:rg_move_complete[7] ASSOCIATE_ACTIONS='' +epprd_rg:rg_move_complete[8] AUXILLIARY_ACTIONS='' +epprd_rg:rg_move_complete[8] SIBLING_GROUPS='' +epprd_rg:rg_move_complete[9] SIBLING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[10] SIBLING_ACQUIRING_GROUPS='' +epprd_rg:rg_move_complete[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[12] SIBLING_RELEASING_GROUPS='' +epprd_rg:rg_move_complete[13] SIBLING_RELEASING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[174] set +a +epprd_rg:rg_move_complete[175] (( 0 != 0 )) +epprd_rg:rg_move_complete[182] : For each participating resource group, serially process the resources. +epprd_rg:rg_move_complete[251] (( 1 == 1 )) +epprd_rg:rg_move_complete[253] [[ REAL == EMUL ]] +epprd_rg:rg_move_complete[259] stopsrc -s rpc.lockd 0513-044 The rpc.lockd Subsystem was requested to stop. +epprd_rg:rg_move_complete[260] rcstopsrc=0 +epprd_rg:rg_move_complete[261] (( 0 != 0 )) +epprd_rg:rg_move_complete[266] (( TRY=0)) +epprd_rg:rg_move_complete[266] (( 0<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 1<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 2<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 3<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 4<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 5<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z '' ]] +epprd_rg:rg_move_complete[273] break +epprd_rg:rg_move_complete[277] [[ ! -z '' ]] +epprd_rg:rg_move_complete[300] : Sure that rpc.lockd stopped. Restart it. +epprd_rg:rg_move_complete[302] startsrc -s rpc.lockd 0513-059 The rpc.lockd Subsystem has been started. Subsystem PID is 26214734. +epprd_rg:rg_move_complete[303] rcstartsrc=0 +epprd_rg:rg_move_complete[304] (( 0 != 0 )) +epprd_rg:rg_move_complete[365] : If the resource group in this rg_move is now homeless, +epprd_rg:rg_move_complete[366] : then we need to put it into an error state. +epprd_rg:rg_move_complete[368] active_node=0 +epprd_rg:rg_move_complete[428] : If the resource group in this rg_move is now homeless_secondary, +epprd_rg:rg_move_complete[429] : then we need to put it into an errorsecondary state. +epprd_rg:rg_move_complete[437] : Set an error state for concurrent groups that have +epprd_rg:rg_move_complete[438] : been brought offline on this node by rg_move. +epprd_rg:rg_move_complete[453] AM_SYNC_CALLED_BY=RG_MOVE_COMPLETE +epprd_rg:rg_move_complete[453] export AM_SYNC_CALLED_BY +epprd_rg:rg_move_complete[454] process_resources :process_resources[3318] version=1.169 :process_resources[3321] STATUS=0 :process_resources[3322] sddsrv_off=FALSE :process_resources[3324] true :process_resources[3326] : call rgpa, and it will tell us what to do next :process_resources[3328] set -a :process_resources[3329] clRGPA :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa 2023-01-28T17:11:45.884436 clrgpa :clRGPA[+55] exit 0 :process_resources[3329] eval JOB_TYPE=NONE :process_resources[1] JOB_TYPE=NONE :process_resources[3330] RC=0 :process_resources[3331] set +a :process_resources[3333] (( 0 != 0 )) :process_resources[3342] RESOURCE_GROUPS='' :process_resources[3343] GROUPNAME='' :process_resources[3343] export GROUPNAME :process_resources[3353] IS_SERVICE_START=1 :process_resources[3354] IS_SERVICE_STOP=1 :process_resources[3360] [[ NONE == RELEASE ]] :process_resources[3360] [[ NONE == ONLINE ]] :process_resources[3729] break :process_resources[3740] : If sddsrv was turned off above, turn it back on again :process_resources[3742] [[ FALSE == TRUE ]] :process_resources[3747] exit 0 +epprd_rg:rg_move_complete[455] STATUS=0 +epprd_rg:rg_move_complete[456] : The exit status of process_resources is: 0 +epprd_rg:rg_move_complete[461] unset AM_SYNC_CALLED_BY +epprd_rg:rg_move_complete[462] [[ TRUE == TRUE ]] +epprd_rg:rg_move_complete[491] [[ -z '' ]] +epprd_rg:rg_move_complete[493] RESOURCE_GROUPS=epprd_rg +epprd_rg:rg_move_complete[499] GROUPNAME=epprd_rg +epprd_rg:rg_move_complete[499] export GROUPNAME +epprd_rg:rg_move_complete[501] cl_rrmethods2call postrg_move +epprd_rg:cl_rrmethods2call[56] version=%I% +epprd_rg:cl_rrmethods2call[84] RRMETHODS='' +epprd_rg:cl_rrmethods2call[85] NEED_RR_ENV_VARS=no +epprd_rg:cl_rrmethods2call[124] NEED_RR_ENV_VARS=yes +epprd_rg:cl_rrmethods2call[129] : Set the '*_REP_RESOURCE' variables if needed. +epprd_rg:cl_rrmethods2call[131] [[ yes == yes ]] +epprd_rg:cl_rrmethods2call[133] cllsres +epprd_rg:cl_rrmethods2call[133] 2> /dev/null +epprd_rg:cl_rrmethods2call[133] eval APPLICATIONS='"epprd_app"' EXPORT_FILESYSTEM='"/board_org"' FILESYSTEM='""' FORCED_VARYON='"false"' FSCHECK_TOOL='"fsck"' FS_BEFORE_IPADDR='"false"' MOUNT_FILESYSTEM='"/board;/board_org"' RECOVERY_METHOD='"sequential"' SERVICE_LABEL='"epprd"' SSA_DISK_FENCING='"false"' VG_AUTO_IMPORT='"false"' VOLUME_GROUP='"datavg"' USERDEFINED_RESOURCES='""' +epprd_rg:cl_rrmethods2call[1] APPLICATIONS=epprd_app +epprd_rg:cl_rrmethods2call[1] EXPORT_FILESYSTEM=/board_org +epprd_rg:cl_rrmethods2call[1] FILESYSTEM='' +epprd_rg:cl_rrmethods2call[1] FORCED_VARYON=false +epprd_rg:cl_rrmethods2call[1] FSCHECK_TOOL=fsck +epprd_rg:cl_rrmethods2call[1] FS_BEFORE_IPADDR=false +epprd_rg:cl_rrmethods2call[1] MOUNT_FILESYSTEM='/board;/board_org' +epprd_rg:cl_rrmethods2call[1] RECOVERY_METHOD=sequential +epprd_rg:cl_rrmethods2call[1] SERVICE_LABEL=epprd +epprd_rg:cl_rrmethods2call[1] SSA_DISK_FENCING=false +epprd_rg:cl_rrmethods2call[1] VG_AUTO_IMPORT=false +epprd_rg:cl_rrmethods2call[1] VOLUME_GROUP=datavg +epprd_rg:cl_rrmethods2call[1] USERDEFINED_RESOURCES='' +epprd_rg:cl_rrmethods2call[137] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[142] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[147] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[152] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[157] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[162] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[167] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[172] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[182] [[ -z '' ]] +epprd_rg:cl_rrmethods2call[184] typeset sysmgdata +epprd_rg:cl_rrmethods2call[185] typeset reposmgdata +epprd_rg:cl_rrmethods2call[186] [[ -x /usr/es/sbin/cluster/xd_generic/xd_cli/clxd_list_mg_smit ]] +epprd_rg:cl_rrmethods2call[191] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[191] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[197] echo '' +epprd_rg:cl_rrmethods2call[199] return 0 +epprd_rg:rg_move_complete[501] METHODS='' +epprd_rg:rg_move_complete[516] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. +epprd_rg:rg_move_complete[518] exit 0 Jan 28 2023 17:11:45 EVENT COMPLETED: rg_move_complete epprds 1 0 |2023-01-28T17:11:45|22167|EVENT COMPLETED: rg_move_complete epprds 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T17:11:46.003084 + echo '|2023-01-28T17:11:46.003084|INFO: rg_move_complete|epprd_rg|epprds|1|0' + 1>> /var/hacmp/availability/clavailability.log PowerHA SystemMirror Event Summary ---------------------------------------------------------------------------- Serial number for this event: 22167 Event: TE_RG_MOVE_ACQUIRE Start time: Sat Jan 28 17:11:39 2023 End time: Sat Jan 28 17:11:47 2023 Action: Resource: Script Name: ---------------------------------------------------------------------------- No resources changed as a result of this event ---------------------------------------------------------------------------- |EVENT_SUMMARY_START|TE_RG_MOVE_ACQUIRE|2023-01-28T17:11:39|2023-01-28T17:11:47|22167| |EVENT_NO_ACTION| |EVENT_SUMMARY_END| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 22167 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_NODE_DEP_COMPLETE|2023-01-28T17:11:49|22167| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Jan 28 2023 17:11:49 EVENT START: node_up_complete epprds |2023-01-28T17:11:49|22167|EVENT START: node_up_complete epprds| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T17:11:49.485303 + echo '|2023-01-28T17:11:49.485303|INFO: node_up_complete|epprds' + 1>> /var/hacmp/availability/clavailability.log + version=%I% + set -a + cllsparam -n epprda + eval NODE_NAME=epprda VERBOSE_LOGGING=high PS4=$'\'${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO]' $'\'' DEBUG_LEVEL=Standard LC_ALL=$'\'C\'' + NODE_NAME=epprda + VERBOSE_LOGGING=high :node_up_complete[1] PS4='${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO] ' :node_up_complete[1] DEBUG_LEVEL=Standard :node_up_complete[1] LC_ALL=C :node_up_complete[80] set +a :node_up_complete[82] NODENAME=epprds :node_up_complete[83] RC=0 :node_up_complete[83] typeset -i RC :node_up_complete[84] UPDATESTATD=0 :node_up_complete[84] typeset -i UPDATESTATD :node_up_complete[86] LPM_IN_PROGRESS_DIR=/var/hacmp/.lpm_in_progress :node_up_complete[86] typeset LPM_IN_PROGRESS_DIR :node_up_complete[87] LPM_IN_PROGRESS_PREFIX=lpm :node_up_complete[87] typeset LPM_IN_PROGRESS_PREFIX :node_up_complete[88] STATE_FILE=/var/hacmp/cl_dr.state :node_up_complete[88] typeset STATE_FILE :node_up_complete[97] STATUS=0 :node_up_complete[99] set -u :node_up_complete[101] (( 1 < 1 )) :node_up_complete[107] START_MODE='' :node_up_complete[107] typeset START_MODE :node_up_complete[108] (( 1 > 1 )) :node_up_complete[114] : serial number for this event is 22167 :node_up_complete[118] RPCLOCKDSTOPPED=0 :node_up_complete[118] typeset -i RPCLOCKDSTOPPED :node_up_complete[119] [[ -f /tmp/.RPCLOCKDSTOPPED ]] :node_up_complete[127] clnodename :node_up_complete[127] wc -l :node_up_complete[127] (( 2 == 2 )) :node_up_complete[129] clodmget -f group -n HACMPgroup :node_up_complete[129] RESOURCE_GROUPS=epprd_rg :node_up_complete[132] clodmget -q group='epprd_rg AND name=EXPORT_FILESYSTEM' -f value -n HACMPresource :node_up_complete[132] EXPORTLIST=/board_org :node_up_complete[133] [[ -n /board_org ]] :node_up_complete[135] UPDATESTATD=1 :node_up_complete[136] [[ epprds == epprda ]] :node_up_complete[146] cl_update_statd :cl_update_statd(0)[+174] version=%I% :cl_update_statd(0)[+176] typeset -i RC=0 :cl_update_statd(0)[+178] LOCAL_FOUND= :cl_update_statd(0)[+179] TWIN_NAME= :cl_update_statd(0)[+180] [[ -z epprda ]] :cl_update_statd(0)[+181] :cl_update_statd(0)[+181] cl_get_path -S OP_SEP=~ :cl_update_statd(0)[+182] set -u :cl_update_statd(0)[+187] LOCAL_FOUND=true :cl_update_statd(0)[+189] TWIN_NAME=epprds :cl_update_statd(0)[+194] : Make sure statd is running locally :cl_update_statd(0)[+196] lssrc -s statd :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw inoperative :cl_update_statd(0)[+196] rpcinfo -p :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw status :cl_update_statd(0)[+207] : Get the current twin, if there is one :cl_update_statd(0)[+209] :cl_update_statd(0)[+209] nfso -H sm_gethost :cl_update_statd(0)[+209] 2>& 1 CURTWIN=epprds :cl_update_statd(0)[+210] RC=0 :cl_update_statd(0)[+212] [[ -z true ]] :cl_update_statd(0)[+212] [[ -z epprds ]] :cl_update_statd(0)[+225] : Get the interface to the twin node :cl_update_statd(0)[+227] :cl_update_statd(0)[+227] get_node_ip epprds :cl_update_statd(0)[+9] (( 1 != 1 )) :cl_update_statd(0)[+15] Twin_Name=epprds :cl_update_statd(0)[+16] NewTwin= :cl_update_statd(0)[+19] : Get the Interface details for every interface on the twin node :cl_update_statd(0)[+20] : Reject interfaces on nodes that are not public boot addresses :cl_update_statd(0)[+21] : because those are the only ones we have state information for :cl_update_statd(0)[+23] :cl_update_statd(0)[+23] cllsif -J ~ -Sw -i epprda :cl_update_statd(0)[+23] LC_ALL=C LOCAL_NETWORK_INFO=epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+24] cllsif -J ~ -Sw -i epprds :cl_update_statd(0)[+24] LC_ALL=C :cl_update_statd(0)[+25] read adapt type network net_type attrib node ip_addr skip interface skip netmask skip skip prefix ip_family :cl_update_statd(0)[+25] IFS=~ :cl_update_statd(0)[+25] [[ public != public ]] :cl_update_statd(0)[+25] [[ boot != boot ]] :cl_update_statd(0)[+33] : Find the state of this candidate :cl_update_statd(0)[+33] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+37] :cl_update_statd(0)[+37] print 61.81.244.123 :cl_update_statd(0)[+37] tr ./ xx addr=i61x81x244x123_epprds :cl_update_statd(0)[+43] eval candidate_state=${i61x81x244x123_epprds:-down} :cl_update_statd(0)[+43] candidate_state=UP :cl_update_statd(0)[+46] : If state is UP, check to see if this node can talk to it :cl_update_statd(0)[+46] [[ UP == UP ]] :cl_update_statd(0)[+50] ping -w 5 -c 1 -q 61.81.244.123 :cl_update_statd(0)[+50] 1> /dev/null :cl_update_statd(0)[+61] echo epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+61] tr \n :cl_update_statd(0)[+62] read lcl_adapt lcl_type lcl_network lcl_net_type lcl_attrib lcl_node lcl_ip_addr skip lcl_interface skip lcl_netmask skip skip lcl_prefix lcl_ip_family :cl_update_statd(0)[+62] IFS=~ :cl_update_statd(0)[+62] [[ net_ether_01 != net_ether_01 ]] :cl_update_statd(0)[+62] [[ boot != boot ]] :cl_update_statd(0)[+62] [[ public != public ]] :cl_update_statd(0)[+62] [[ AF_INET != AF_INET ]] :cl_update_statd(0)[+62] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+71] :cl_update_statd(0)[+71] print 61.81.244.134 :cl_update_statd(0)[+71] tr ./ xx addr=i61x81x244x134_epprda :cl_update_statd(0)[+77] eval lcl_candidate_state=${i61x81x244x134_epprda:-down} :cl_update_statd(0)[+77] lcl_candidate_state=UP :cl_update_statd(0)[+77] [[ UP == UP ]] :cl_update_statd(0)[+81] : epprds is on the same network as an interface that is up :cl_update_statd(0)[+82] : on the local node, and the attributes match. :cl_update_statd(0)[+84] NewTwin=epprds :cl_update_statd(0)[+85] break :cl_update_statd(0)[+85] [[ -n epprds ]] :cl_update_statd(0)[+91] break :cl_update_statd(0)[+91] [[ -z epprds ]] :cl_update_statd(0)[+100] echo epprds :cl_update_statd(0)[+101] return 0 NEWTWIN=epprds :cl_update_statd(0)[+227] [[ -z epprds ]] :cl_update_statd(0)[+227] [[ epprds != epprds ]] :cl_update_statd(0)[+259] : RC is actually 0 :cl_update_statd(0)[+266] return 0 :node_up_complete[147] (( 0 )) :node_up_complete[151] break :node_up_complete[156] (( 1 )) :node_up_complete[158] (( 0 )) :node_up_complete[198] [[ TRUE == FALSE ]] :node_up_complete[268] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. :node_up_complete[270] : This is the final clRGinfo output :node_up_complete[272] clRGinfo -p -t :node_up_complete[272] 2>& 1 clRGinfo[431]: version I clRGinfo[517]: Number of resource groups = 0 clRGinfo[562]: cluster epprda_cluster is version = 22 clRGinfo[597]: no resource groups specified on command line - print all clRGinfo[685]: Current group is 'epprd_rg' get primary state info for state 6 get secondary state info for state 6 getPrimaryStateStr: using primary_table => primary_state_table get primary state info for state 4 get secondary state info for state 4 getPrimaryStateStr: using primary_table => primary_state_table Cluster Name: epprda_cluster Resource Group Name: epprd_rg Node Group State Delayed Timers ---------------------------------------------------------------- --------------- ------------------- epprda ONLINE epprds OFFLINE :node_up_complete[277] (( 0 == 0 )) :node_up_complete[279] [[ epprds != epprda ]] :node_up_complete[281] grep -w In_progress_file /var/hacmp/cl_dr.state :node_up_complete[281] 2> /dev/null :node_up_complete[281] cut -d= -f2 :node_up_complete[281] lpm_in_progress_file='' :node_up_complete[282] ls '/var/hacmp/.lpm_in_progress/lpm_*' :node_up_complete[282] 2> /dev/null :node_up_complete[282] lpm_in_progress_prefix='' :node_up_complete[283] [[ -n '' ]] :node_up_complete[300] exit 0 Jan 28 2023 17:11:49 EVENT COMPLETED: node_up_complete epprds 0 |2023-01-28T17:11:49|22167|EVENT COMPLETED: node_up_complete epprds 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T17:11:49.707442 + echo '|2023-01-28T17:11:49.707442|INFO: node_up_complete|epprds|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 17:59:58 EVENT START: admin_op clrm_stop_request 22168 0 |2023-01-28T17:59:58|22168|EVENT START: admin_op clrm_stop_request 22168 0| :admin_op[110] trap sigint_handler INT :admin_op[116] OP_TYPE=clrm_stop_request :admin_op[116] typeset OP_TYPE :admin_op[117] SERIAL=22168 :admin_op[117] typeset -li SERIAL :admin_op[118] INVALID=0 :admin_op[118] typeset -li INVALID The administrator initiated the following action at Sat Jan 28 17:59:58 KORST 2023 Check smit.log and clutils.log for additional details. Stopping PowerHA cluster services on node: epprda in graceful mode... Jan 28 2023 17:59:58 EVENT COMPLETED: admin_op clrm_stop_request 22168 0 0 |2023-01-28T17:59:58|22168|EVENT COMPLETED: admin_op clrm_stop_request 22168 0 0| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 22168 Stop cluster services request with 'Graceful' option received for 'epprda'. Enqueued rg_move release event for resource group epprd_rg. Node Down Completion Event has been enqueued. ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_FAIL_NODE_DEP|2023-01-28T17:59:58|22168| |STOP_CLUSTER_SERVICES|Graceful|epprda| |CLUSTER_RG_MOVE_RELEASE|epprd_rg| |NODE_DOWN_COMPLETE| |EVENT_PREAMBLE_END| Jan 28 2023 17:59:59 EVENT START: node_down epprda graceful |2023-01-28T17:59:59|22168|EVENT START: node_down epprda graceful| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T17:59:59.601396 + echo '|2023-01-28T17:59:59.601396|INFO: node_down|epprda|graceful' + 1>> /var/hacmp/availability/clavailability.log :node_down[64] version=%I% :node_down[67] NODENAME=epprda :node_down[67] export NODENAME :node_down[68] PARAM=graceful :node_down[68] export PARAM :node_down[75] STATUS=0 :node_down[75] typeset -li STATUS :node_down[77] AIX_SHUTDOWN=false :node_down[79] set -u :node_down[81] (( 2 < 1 )) :node_down[87] : serial number for this event is 22168 :node_down[91] : Clean up NFS state tracking :node_down[93] UPDATESTATDFILE=/usr/es/sbin/cluster/etc/updatestatd :node_down[94] rm -f /tmp/.RPCLOCKDSTOPPED :node_down[95] rm -f /usr/es/sbin/cluster/etc/updatestatd :node_down[96] UPDATESTATD=0 :node_down[97] export UPDATESTATD :node_down[100] : For RAS debugging, the result of ps -edf is captured at this time :node_down[102] : begin ps -edf :node_down[103] ps -edf UID PID PPID C STIME TTY TIME CMD root 1 0 0 Nov 16 - 0:01 /etc/init root 4260170 6095340 0 Nov 16 - 0:00 /usr/sbin/syslogd root 5046714 1 0 Nov 16 - 0:00 /usr/ccs/bin/shlap64 root 5177846 1 0 Nov 16 - 17:12 /usr/sbin/syncd 60 root 5898680 1 0 Nov 16 - 0:00 /usr/dt/bin/dtlogin -daemon root 5964246 1 0 Nov 16 - 0:00 /usr/lib/errdemon root 6029768 6095340 0 16:38:31 - 0:00 /usr/sbin/snmpd root 6095340 1 0 Nov 16 - 0:00 /usr/sbin/srcmstr root 6226176 6095340 0 Nov 16 - 0:00 /usr/sbin/inetd root 6357492 6095340 0 Nov 16 - 0:00 /usr/sbin/portmap root 6488536 6095340 0 Nov 16 - 0:56 /usr/sbin/xntpd -x root 6816230 6095340 0 Nov 16 - 0:04 /usr/sbin/hostmibd root 6881760 6095340 0 Nov 16 - 0:23 sendmail: accepting connections root 6947294 6095340 0 Nov 16 - 0:04 /usr/sbin/snmpmibd root 7143710 6095340 0 Nov 16 - 0:22 /usr/sbin/aixmibd root 7668214 1 0 Nov 16 - 0:11 /usr/sbin/cron root 7799282 6095340 0 Nov 16 - 1:12 /usr/sbin/aso daemon 7864678 6095340 0 17:10:55 - 0:00 /usr/sbin/rpc.statd -d 0 -t 50 root 7930136 6095340 0 Nov 16 - 0:00 /usr/sbin/qdaemon root 8061186 6095340 0 Nov 16 - 0:00 /usr/sbin/biod 6 root 8126748 1 0 Nov 16 - 0:00 /usr/sbin/uprintfd root 8257896 6095340 0 17:10:51 - 0:00 /usr/sbin/rpc.mountd root 8520102 6095340 0 Nov 16 - 0:00 /usr/sbin/writesrv root 8585542 6095340 0 Nov 16 - 0:00 /usr/sbin/sshd root 8913186 6095340 0 Nov 16 - 0:00 /usr/sbin/pfcdaemon root 13959478 6095340 0 17:00:52 - 0:00 /opt/rsct/bin/rmcd -a IBM.LPCommands -r -S 1500 root 14025136 6095340 0 Nov 16 - 0:00 /usr/sbin/lldpd root 14090674 6095340 0 Nov 16 - 0:00 /usr/sbin/ecpvdpd root 14287294 1 0 Nov 16 - 1:26 /usr/bin/topasrec -L -s 300 -R 1 -r 6 -o /var/perf/daily/ -ypersistent=1 -O type=bin -ystart_time=15:11:38,Nov16,2022 root 14352890 6095340 0 Nov 16 - 0:04 /opt/rsct/bin/IBM.MgmtDomainRMd root 14614984 1 0 15:01:09 - 0:00 /usr/sbin/getty /dev/console root 14877148 6095340 0 Nov 16 - 0:00 /var/perf/pm/bin/pmperfrec root 15008234 6095340 0 Nov 16 - 0:00 /opt/rsct/bin/IBM.HostRMd root 15073556 6095340 0 Nov 16 - 0:00 /opt/rsct/bin/IBM.ServiceRMd root 15532528 6095340 0 Nov 16 - 0:00 /opt/rsct/bin/IBM.DRMd root 18088406 8585542 0 16:32:54 - 0:00 sshd: root@pts/5 root 18612628 8585542 0 17:57:26 - 0:00 sshd: root@pts/6 root 18743778 8585542 0 15:03:01 - 0:00 sshd: root@pts/2 root 20251054 22020420 0 16:48:17 pts/4 0:00 -ksh root 20447554 8585542 0 16:41:04 - 0:00 sshd: root@pts/7 root 20513024 20447554 0 16:41:07 pts/7 0:00 -ksh root 20709790 18088406 0 16:32:54 pts/5 0:00 -ksh root 20972018 6095340 0 17:07:08 - 0:00 /opt/rsct/bin/IBM.ConfigRMd root 21561614 26411472 0 17:54:30 pts/3 0:00 smitty mknfsexp root 21823786 18612628 0 17:57:27 pts/6 0:00 -ksh root 22020420 8585542 0 16:48:14 - 0:00 sshd: root@pts/4 root 22086068 6095340 0 16:40:09 - 0:00 /usr/es/sbin/cluster/clstrmgr root 22217052 18743778 0 15:03:01 pts/2 0:00 -ksh root 22610296 6095340 0 17:09:04 - 0:00 /opt/rsct/bin/IBM.StorageRMd root 22872426 6095340 0 17:09:38 - 0:00 /usr/sbin/clcomd -d -g root 23003588 1 0 00:00:00 - 0:00 /usr/bin/topas_nmon -f -d -t -s 300 -c 288 -youtput_dir=/ptf/nmon/epprda -ystart_time=00:00:00,Jan28,2023 root 23462322 22086068 0 17:10:27 - 0:00 run_rcovcmd root 25166118 25297194 0 17:40:42 pts/1 0:00 -ksh root 25297194 8585542 0 17:40:41 - 0:00 sshd: root@pts/1 root 25362756 23462322 4 17:59:59 - 0:00 /usr/es/sbin/cluster/events/cmd/clcallev node_down epprda graceful root 26018206 28049702 0 17:59:59 - 0:00 ps -edf root 26214734 6095340 0 17:11:45 - 0:00 /usr/sbin/rpc.lockd -d 0 root 26411472 27853206 0 17:50:59 pts/3 0:00 -ksh root 26804518 1 0 0:00 root 26935622 28246396 0 17:10:28 - 0:00 /usr/sbin/gsclvmd -r 30 -i 300 -t 300 -c 00c44af100004b00000001851e9dc053 -v 0 root 27394550 28311894 0 17:32:07 pts/0 0:00 -ksh root 27853206 8585542 0 17:50:59 - 0:00 sshd: root@pts/3 root 28049702 25362756 0 17:59:59 - 0:00 /bin/ksh93 /usr/es/sbin/cluster/events/node_down epprda graceful root 28180804 13959478 0 17:00:52 - 0:00 [trspoolm] root 28246396 6095340 0 17:10:21 - 0:00 /usr/sbin/gsclvmd root 28311894 8585542 0 17:32:06 - 0:00 sshd: root@pts/0 root 28377402 6095340 0 17:10:46 - 0:00 /usr/sbin/nfsd 3891 root 28770708 6095340 0 17:08:42 - 0:00 /usr/sbin/clconfd root 28901860 20513024 1 17:59:53 pts/7 0:00 smitty clstop root 29163932 6095340 0 17:08:43 - 0:00 /usr/sbin/rsct/bin/hagsd cthags :node_down[104] : end ps -edf :node_down[107] : If RG_DEPENDENCIES is not false, all RG actions are taken via rg_move events. :node_down[109] [[ graceful != forced ]] :node_down[109] [[ TRUE == FALSE ]] :node_down[207] : Processing specific to the local node :node_down[209] [[ epprda == epprda ]] :node_down[212] : Stopping cluster services on epprda with the graceful option :node_down[214] [[ graceful != forced ]] :node_down[219] lsvg -L :node_down[219] lsvg -L -o :node_down[219] paste -s '-d|' - :node_down[219] grep -w -v -x -E 'datavg|caavg_private|rootvg' :node_down[219] INACTIVE_VGS='' :node_down[222] [[ -n '' ]] :node_down[272] unset PS4_LOOP :node_down[276] : update the location DB to indicate this node is going down :node_down[278] clchdaemons -r -d clstrmgr_scripts -t resource_locator :node_down[296] [[ -n false ]] :node_down[296] [[ false == true ]] :node_down[305] exit 0 Jan 28 2023 17:59:59 EVENT COMPLETED: node_down epprda graceful 0 |2023-01-28T17:59:59|22168|EVENT COMPLETED: node_down epprda graceful 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T17:59:59.748499 + echo '|2023-01-28T17:59:59.748499|INFO: node_down|epprda|graceful|0' + 1>> /var/hacmp/availability/clavailability.log PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 22171 Stop cluster services request with 'Graceful' option received for 'epprds'. Enqueued rg_move release event for resource group epprd_rg. Node Down Completion Event has been enqueued. ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_FAIL_NODE_DEP|2023-01-28T18:00:02|22171| |STOP_CLUSTER_SERVICES|Graceful|epprds| |CLUSTER_RG_MOVE_RELEASE|epprd_rg| |NODE_DOWN_COMPLETE| |EVENT_PREAMBLE_END| Jan 28 2023 18:00:03 EVENT START: node_down epprds graceful |2023-01-28T18:00:03|22171|EVENT START: node_down epprds graceful| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:00:03.454439 + echo '|2023-01-28T18:00:03.454439|INFO: node_down|epprds|graceful' + 1>> /var/hacmp/availability/clavailability.log :node_down[64] version=%I% :node_down[67] NODENAME=epprds :node_down[67] export NODENAME :node_down[68] PARAM=graceful :node_down[68] export PARAM :node_down[75] STATUS=0 :node_down[75] typeset -li STATUS :node_down[77] AIX_SHUTDOWN=false :node_down[79] set -u :node_down[81] (( 2 < 1 )) :node_down[87] : serial number for this event is 22171 :node_down[91] : Clean up NFS state tracking :node_down[93] UPDATESTATDFILE=/usr/es/sbin/cluster/etc/updatestatd :node_down[94] rm -f /tmp/.RPCLOCKDSTOPPED :node_down[95] rm -f /usr/es/sbin/cluster/etc/updatestatd :node_down[96] UPDATESTATD=0 :node_down[97] export UPDATESTATD :node_down[100] : For RAS debugging, the result of ps -edf is captured at this time :node_down[102] : begin ps -edf :node_down[103] ps -edf UID PID PPID C STIME TTY TIME CMD root 1 0 0 Nov 16 - 0:01 /etc/init root 4260170 6095340 0 Nov 16 - 0:00 /usr/sbin/syslogd root 5046714 1 0 Nov 16 - 0:00 /usr/ccs/bin/shlap64 root 5177846 1 0 Nov 16 - 17:12 /usr/sbin/syncd 60 root 5898680 1 0 Nov 16 - 0:00 /usr/dt/bin/dtlogin -daemon root 5964246 1 0 Nov 16 - 0:00 /usr/lib/errdemon root 6029768 6095340 0 16:38:31 - 0:00 /usr/sbin/snmpd root 6095340 1 0 Nov 16 - 0:00 /usr/sbin/srcmstr root 6226176 6095340 0 Nov 16 - 0:00 /usr/sbin/inetd root 6357492 6095340 0 Nov 16 - 0:00 /usr/sbin/portmap root 6488536 6095340 0 Nov 16 - 0:56 /usr/sbin/xntpd -x root 6816230 6095340 0 Nov 16 - 0:04 /usr/sbin/hostmibd root 6881760 6095340 0 Nov 16 - 0:23 sendmail: accepting connections root 6947294 6095340 0 Nov 16 - 0:04 /usr/sbin/snmpmibd root 7143710 6095340 0 Nov 16 - 0:22 /usr/sbin/aixmibd root 7668214 1 0 Nov 16 - 0:11 /usr/sbin/cron root 7799282 6095340 0 Nov 16 - 1:12 /usr/sbin/aso daemon 7864678 6095340 0 17:10:55 - 0:00 /usr/sbin/rpc.statd -d 0 -t 50 root 7930136 6095340 0 Nov 16 - 0:00 /usr/sbin/qdaemon root 8061186 6095340 0 Nov 16 - 0:00 /usr/sbin/biod 6 root 8126748 1 0 Nov 16 - 0:00 /usr/sbin/uprintfd root 8257896 6095340 0 17:10:51 - 0:00 /usr/sbin/rpc.mountd root 8520102 6095340 0 Nov 16 - 0:00 /usr/sbin/writesrv root 8585542 6095340 0 Nov 16 - 0:00 /usr/sbin/sshd root 8913186 6095340 0 Nov 16 - 0:00 /usr/sbin/pfcdaemon root 13959478 6095340 0 17:00:52 - 0:00 /opt/rsct/bin/rmcd -a IBM.LPCommands -r -S 1500 root 14025136 6095340 0 Nov 16 - 0:00 /usr/sbin/lldpd root 14090674 6095340 0 Nov 16 - 0:00 /usr/sbin/ecpvdpd root 14287294 1 0 Nov 16 - 1:26 /usr/bin/topasrec -L -s 300 -R 1 -r 6 -o /var/perf/daily/ -ypersistent=1 -O type=bin -ystart_time=15:11:38,Nov16,2022 root 14352890 6095340 0 Nov 16 - 0:04 /opt/rsct/bin/IBM.MgmtDomainRMd root 14614984 1 0 15:01:09 - 0:00 /usr/sbin/getty /dev/console root 14877148 6095340 0 Nov 16 - 0:00 /var/perf/pm/bin/pmperfrec root 15008234 6095340 0 Nov 16 - 0:00 /opt/rsct/bin/IBM.HostRMd root 15073556 6095340 0 Nov 16 - 0:00 /opt/rsct/bin/IBM.ServiceRMd root 15532528 6095340 0 Nov 16 - 0:00 /opt/rsct/bin/IBM.DRMd root 18088406 8585542 0 16:32:54 - 0:00 sshd: root@pts/5 root 18612628 8585542 0 17:57:26 - 0:00 sshd: root@pts/6 root 18743778 8585542 0 15:03:01 - 0:00 sshd: root@pts/2 root 20251054 22020420 0 16:48:17 pts/4 0:00 -ksh root 20447554 8585542 0 16:41:04 - 0:00 sshd: root@pts/7 root 20513024 20447554 0 16:41:07 pts/7 0:00 -ksh root 20709790 18088406 0 16:32:54 pts/5 0:00 -ksh root 20972018 6095340 0 17:07:08 - 0:00 /opt/rsct/bin/IBM.ConfigRMd root 21561614 26411472 0 17:54:30 pts/3 0:00 smitty mknfsexp root 21823786 18612628 0 17:57:27 pts/6 0:00 -ksh root 22020420 8585542 0 16:48:14 - 0:00 sshd: root@pts/4 root 22086068 6095340 0 16:40:09 - 0:00 /usr/es/sbin/cluster/clstrmgr root 22217052 18743778 0 15:03:01 pts/2 0:00 -ksh root 22610296 6095340 0 17:09:04 - 0:00 /opt/rsct/bin/IBM.StorageRMd root 22872426 6095340 0 17:09:38 - 0:00 /usr/sbin/clcomd -d -g root 23003588 1 0 00:00:00 - 0:00 /usr/bin/topas_nmon -f -d -t -s 300 -c 288 -youtput_dir=/ptf/nmon/epprda -ystart_time=00:00:00,Jan28,2023 root 23462322 22086068 0 17:10:27 - 0:00 run_rcovcmd root 25166118 25297194 0 17:40:42 pts/1 0:00 -ksh root 25297194 8585542 0 17:40:41 - 0:00 sshd: root@pts/1 root 26214734 6095340 0 17:11:45 - 0:00 /usr/sbin/rpc.lockd -d 0 root 26411472 27853206 0 17:50:59 pts/3 0:00 -ksh root 26804570 28049712 0 18:00:03 - 0:00 ps -edf root 26935622 28246396 0 17:10:28 - 0:00 /usr/sbin/gsclvmd -r 30 -i 300 -t 300 -c 00c44af100004b00000001851e9dc053 -v 0 root 27394550 28311894 0 17:32:07 pts/0 0:00 -ksh root 27853206 8585542 0 17:50:59 - 0:00 sshd: root@pts/3 root 28049712 28901864 0 18:00:03 - 0:00 /bin/ksh93 /usr/es/sbin/cluster/events/node_down epprds graceful root 28180804 13959478 0 17:00:52 - 0:00 [trspoolm] root 28246396 6095340 0 17:10:21 - 0:00 /usr/sbin/gsclvmd root 28311894 8585542 0 17:32:06 - 0:00 sshd: root@pts/0 root 28377402 6095340 0 17:10:46 - 0:00 /usr/sbin/nfsd 3891 root 28770708 6095340 0 17:08:42 - 0:00 /usr/sbin/clconfd root 28901864 23462322 3 18:00:03 - 0:00 /usr/es/sbin/cluster/events/cmd/clcallev node_down epprds graceful root 29163932 6095340 0 17:08:43 - 0:00 /usr/sbin/rsct/bin/hagsd cthags :node_down[104] : end ps -edf :node_down[107] : If RG_DEPENDENCIES is not false, all RG actions are taken via rg_move events. :node_down[109] [[ graceful != forced ]] :node_down[109] [[ TRUE == FALSE ]] :node_down[207] : Processing specific to the local node :node_down[209] [[ epprds == epprda ]] :node_down[284] : epprds, is not the local node, handle fencing for any VGs marked as $'\'CRITICAL\'.' :node_down[286] cl_fence_vg epprds :cl_fence_vg[336] version=%I% :cl_fence_vg[341] : Collect list of disks, for use later :cl_fence_vg[343] lspv :cl_fence_vg[343] lspv_out=$'hdisk0 00c44af155592938 rootvg active \nhdisk1 00c44af11e9e1645 caavg_private active \nhdisk2 00c44af11e8a9c69 datavg concurrent \nhdisk3 00c44af11e8a9cd7 datavg concurrent \nhdisk4 00c44af11e8a9d3c datavg concurrent \nhdisk5 00c44af11e8a9c05 datavg concurrent \nhdisk6 00c44af11e8a9e05 datavg concurrent \nhdisk7 00c44af11e8a9d9f datavg concurrent \nhdisk8 00c44af11e8a9e69 datavg concurrent ' :cl_fence_vg[345] [[ -z epprda ]] :cl_fence_vg[354] : Accept a formal parameter of 'name of node that failed' if none were set :cl_fence_vg[355] : in the environment :cl_fence_vg[357] EVENTNODE=epprds :cl_fence_vg[359] [[ -z epprds ]] :cl_fence_vg[368] : An explicit volume group list can be passed after the name of :cl_fence_vg[369] : the node that failed. Pick up any such :cl_fence_vg[371] shift :cl_fence_vg[372] vg_list='' :cl_fence_vg[374] common_groups='' :cl_fence_vg[375] common_critical_vgs='' :cl_fence_vg[377] [[ -z '' ]] :cl_fence_vg[380] : Find all the concurrent resource groups that contain both epprds and epprda :cl_fence_vg[382] clodmget -q 'startup_pref = OAAN' -f group -n HACMPgroup :cl_fence_vg[424] : Look at each of the resource groups in turn to determine what CRITICAL :cl_fence_vg[425] : volume groups the local node epprda share access with epprds :cl_fence_vg[443] : Process the list of common volume groups, :node_down[296] [[ -n false ]] :node_down[296] [[ false == true ]] :node_down[305] exit 0 Jan 28 2023 18:00:03 EVENT COMPLETED: node_down epprds graceful 0 |2023-01-28T18:00:03|22171|EVENT COMPLETED: node_down epprds graceful 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:00:03.600203 + echo '|2023-01-28T18:00:03.600203|INFO: node_down|epprds|graceful|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 18:00:05 EVENT START: rg_move_release epprda 1 |2023-01-28T18:00:05|22169|EVENT START: rg_move_release epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:00:05.820719 + echo '|2023-01-28T18:00:05.820719|INFO: rg_move_release|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_release[+54] [[ high = high ]] :rg_move_release[+54] version=1.6 :rg_move_release[+56] set -u :rg_move_release[+58] [ 2 != 2 ] :rg_move_release[+64] set +u :rg_move_release[+66] clcallev rg_move epprda 1 RELEASE Jan 28 2023 18:00:05 EVENT START: rg_move epprda 1 RELEASE |2023-01-28T18:00:05|22169|EVENT START: rg_move epprda 1 RELEASE| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-01-28T18:00:05.946742 :clevlog[amlog_trace:320] echo '|2023-01-28T18:00:05.946742|INFO: rg_move|epprd_rg|epprda|1|RELEASE' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :rg_move[76] version=%I% :rg_move[86] STATUS=0 :rg_move[88] [[ ! -n '' ]] :rg_move[90] EMULATE=REAL :rg_move[96] set -u :rg_move[98] NODENAME=epprda :rg_move[98] export NODENAME :rg_move[99] RGID=1 :rg_move[100] (( 3 == 3 )) :rg_move[102] ACTION=RELEASE :rg_move[108] : serial number for this event is 22169 :rg_move[112] RG_UP_POSTEVENT_ON_NODE=epprda :rg_move[112] export RG_UP_POSTEVENT_ON_NODE :rg_move[116] clodmget -qid=1 -f group -n HACMPgroup :rg_move[116] eval RGNAME=epprd_rg :rg_move[1] RGNAME=epprd_rg :rg_move[118] UPDATESTATD=0 :rg_move[119] export UPDATESTATD :rg_move[123] RG_MOVE_EVENT=true :rg_move[123] export RG_MOVE_EVENT :rg_move[128] group_state='$RESGRP_epprd_rg_epprda' :rg_move[129] set +u :rg_move[130] eval print '$RESGRP_epprd_rg_epprda' :rg_move[1] print ONLINE :rg_move[130] RG_MOVE_ONLINE=ONLINE :rg_move[130] export RG_MOVE_ONLINE :rg_move[131] set -u :rg_move[132] RG_MOVE_ONLINE=ONLINE :rg_move[139] rm -f /tmp/.NFSSTOPPED :rg_move[140] rm -f /tmp/.RPCLOCKDSTOPPED :rg_move[147] set -a :rg_move[148] clsetenvgrp epprda rg_move epprd_rg :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 :rg_move[148] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[149] RC=0 :rg_move[150] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[1] FORCEDOWN_GROUPS='' :rg_move[2] RESOURCE_GROUPS='' :rg_move[3] HOMELESS_GROUPS='' :rg_move[4] HOMELESS_FOLLOWER_GROUPS='' :rg_move[5] ERRSTATE_GROUPS='' :rg_move[6] PRINCIPAL_ACTIONS='' :rg_move[7] ASSOCIATE_ACTIONS='' :rg_move[8] AUXILLIARY_ACTIONS='' :rg_move[8] SIBLING_GROUPS='' :rg_move[9] SIBLING_NODES_BY_GROUP='' :rg_move[10] SIBLING_ACQUIRING_GROUPS='' :rg_move[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' :rg_move[12] SIBLING_RELEASING_GROUPS='' :rg_move[13] SIBLING_RELEASING_NODES_BY_GROUP='' :rg_move[151] set +a :rg_move[155] (( 0 != 0 )) :rg_move[155] [[ -z epprd_rg ]] :rg_move[164] [[ -z TRUE ]] :rg_move[241] AM_SYNC_CALLED_BY=RG_MOVE :rg_move[241] export AM_SYNC_CALLED_BY :rg_move[242] process_resources :process_resources[3318] version=1.169 :process_resources[3321] STATUS=0 :process_resources[3322] sddsrv_off=FALSE :process_resources[3324] true :process_resources[3326] : call rgpa, and it will tell us what to do next :process_resources[3328] set -a :process_resources[3329] clRGPA :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa 2023-01-28T18:00:06.069058 clrgpa :clRGPA[+55] exit 0 :process_resources[3329] eval JOB_TYPE=RELEASE RESOURCE_GROUPS='"epprd_rg"' PRINCIPAL_ACTION='"RELEASE"' AUXILLIARY_ACTION='"NONE"' :process_resources[1] JOB_TYPE=RELEASE :process_resources[1] RESOURCE_GROUPS=epprd_rg :process_resources[1] PRINCIPAL_ACTION=RELEASE :process_resources[1] AUXILLIARY_ACTION=NONE :process_resources[3330] RC=0 :process_resources[3331] set +a :process_resources[3333] (( 0 != 0 )) :process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3363] INFO_STRING='' +epprd_rg:process_resources[3364] clnodename +epprd_rg:process_resources[3373] ENV_VAR=GROUP_epprd_rg_epprda +epprd_rg:process_resources[3374] eval 'echo $GROUP_epprd_rg_epprda' +epprd_rg:process_resources[1] echo ISUPPREEVENT +epprd_rg:process_resources[3374] read ENV_VAR +epprd_rg:process_resources[3375] [[ ISUPPREEVENT == WILLBEUPPOSTEVENT ]] +epprd_rg:process_resources[3379] [[ ISUPPREEVENT == ISUPPREEVENT ]] +epprd_rg:process_resources[3380] INFO_STRING='|SOURCE=epprda' +epprd_rg:process_resources[3381] IS_SERVICE_START=0 +epprd_rg:process_resources[3373] ENV_VAR=GROUP_epprd_rg_epprds +epprd_rg:process_resources[3374] eval 'echo $GROUP_epprd_rg_epprds' +epprd_rg:process_resources[1] echo +epprd_rg:process_resources[3374] read ENV_VAR +epprd_rg:process_resources[3375] [[ '' == WILLBEUPPOSTEVENT ]] +epprd_rg:process_resources[3379] [[ '' == ISUPPREEVENT ]] +epprd_rg:process_resources[3384] (( 0 == 0 && 1 ==0 )) +epprd_rg:process_resources[3660] set_resource_group_state RELEASING +epprd_rg:process_resources[set_resource_group_state:82] PS4_FUNC=set_resource_group_state +epprd_rg:process_resources[set_resource_group_state:82] typeset PS4_FUNC +epprd_rg:process_resources[set_resource_group_state:83] [[ high == high ]] +epprd_rg:process_resources[set_resource_group_state:83] set -x +epprd_rg:process_resources[set_resource_group_state:84] STAT=0 +epprd_rg:process_resources[set_resource_group_state:85] new_status=RELEASING +epprd_rg:process_resources[set_resource_group_state:89] export GROUPNAME +epprd_rg:process_resources[set_resource_group_state:90] [[ RELEASING != DOWN ]] +epprd_rg:process_resources[set_resource_group_state:92] clchdaemons -d clstrmgr_scripts -t resource_locator -n epprda -o epprd_rg -v RELEASING +epprd_rg:process_resources[set_resource_group_state:100] : Resource Manager Updates +epprd_rg:process_resources[set_resource_group_state:111] amlog_trace '' 'acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:318] clcycle clavailability.log +epprd_rg:process_resources[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:process_resources[amlog_trace:319] cltime +epprd_rg:process_resources[amlog_trace:319] DATE=2023-01-28T18:00:06.114547 +epprd_rg:process_resources[amlog_trace:320] echo '|2023-01-28T18:00:06.114547|INFO: acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:process_resources[set_resource_group_state:112] cl_RMupdate releasing epprd_rg process_resources 2023-01-28T18:00:06.139042 2023-01-28T18:00:06.143586 +epprd_rg:process_resources[set_resource_group_state:153] return 0 +epprd_rg:process_resources[3661] RC=0 +epprd_rg:process_resources[3662] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:00:06.155780 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=APPLICATIONS ACTION=RELEASE ALL_APPLICATIONS='"epprd_app"' RESOURCE_GROUPS='"epprd_rg' '"' MISCDATA='""' +epprd_rg:process_resources[1] JOB_TYPE=APPLICATIONS +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] ALL_APPLICATIONS=epprd_app +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] MISCDATA='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ APPLICATIONS == RELEASE ]] +epprd_rg:process_resources[3360] [[ APPLICATIONS == ONLINE ]] +epprd_rg:process_resources[3549] process_applications RELEASE +epprd_rg:process_resources[process_applications:312] PS4_FUNC=process_applications +epprd_rg:process_resources[process_applications:312] typeset PS4_FUNC +epprd_rg:process_resources[process_applications:313] [[ high == high ]] +epprd_rg:process_resources[process_applications:313] set -x +epprd_rg:process_resources[process_applications:316] : Each subprocess will log to a file with this name and PID +epprd_rg:process_resources[process_applications:318] TMP_FILE=/var/hacmp/log/.process_resources_applications.21954878 +epprd_rg:process_resources[process_applications:318] export TMP_FILE +epprd_rg:process_resources[process_applications:320] rm -f '/var/hacmp/log/.process_resources_applications*' +epprd_rg:process_resources[process_applications:322] WAITPIDS='' +epprd_rg:process_resources[process_applications:323] LPAR_ACQUIRE_FAILED=0 +epprd_rg:process_resources[process_applications:324] LPAR_RELEASE_FAILED=0 +epprd_rg:process_resources[process_applications:325] START_STOP_FAILED=0 +epprd_rg:process_resources[process_applications:326] LIST_OF_APPS=epprd_app +epprd_rg:process_resources[process_applications:329] : Acquire lpar resources in one-shot before starting applications +epprd_rg:process_resources[process_applications:331] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[process_applications:343] (( LPAR_ACQUIRE_FAILED == 0 )) +epprd_rg:process_resources[process_applications:345] : Loop through all groups to start or stop applications +epprd_rg:process_resources[process_applications:348] export GROUPNAME +epprd_rg:process_resources[process_applications:351] : Break out application data +epprd_rg:process_resources[process_applications:353] get_list_head epprd_app +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo epprd_app +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo epprd_app +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[process_applications:353] read LIST_OF_APPLICATIONS_FOR_RG +epprd_rg:process_resources[process_applications:354] get_list_tail epprd_app +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo epprd_app +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[process_applications:354] read ALL_APPLICATIONS +epprd_rg:process_resources[process_applications:356] get_list_head +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[process_applications:356] read MISCDATA_FOR_RG +epprd_rg:process_resources[process_applications:357] get_list_tail +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[process_applications:357] read MISCDATA +epprd_rg:process_resources[process_applications:359] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[process_applications:363] TMPLIST='' +epprd_rg:process_resources[process_applications:364] print epprd_app +epprd_rg:process_resources[process_applications:364] set -A appnames epprd_app +epprd_rg:process_resources[process_applications:366] (( cnt=0)) +epprd_rg:process_resources[process_applications:366] (( cnt < 1)) +epprd_rg:process_resources[process_applications:367] TMPLIST='epprd_app ' +epprd_rg:process_resources[process_applications:368] LIST_OF_APPLICATIONS_FOR_RG=epprd_app +epprd_rg:process_resources[process_applications:366] ((cnt++ )) +epprd_rg:process_resources[process_applications:366] (( cnt < 1)) +epprd_rg:process_resources[process_applications:371] LIST_OF_APPLICATIONS_FOR_RG='epprd_app ' +epprd_rg:process_resources[process_applications:374] APPLICATIONS='epprd_app ' +epprd_rg:process_resources[process_applications:374] export APPLICATIONS +epprd_rg:process_resources[process_applications:375] MISC_DATA='' +epprd_rg:process_resources[process_applications:375] export MISC_DATA +epprd_rg:process_resources[process_applications:378] : Now call start_or_stop_applications_for_rg to do the app start/stop. +epprd_rg:process_resources[process_applications:381] start_or_stop_applications_for_rg RELEASE /var/hacmp/log/.process_resources_applications.21954878.epprd_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:248] PS4_FUNC=start_or_stop_applications_for_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:248] typeset PS4_FUNC +epprd_rg:process_resources[start_or_stop_applications_for_rg:249] [[ high == high ]] +epprd_rg:process_resources[start_or_stop_applications_for_rg:249] set -x +epprd_rg:process_resources[start_or_stop_applications_for_rg:251] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[start_or_stop_applications_for_rg:255] cmd_to_execute=stop_server +epprd_rg:process_resources[start_or_stop_applications_for_rg:259] : File name to store our exit status +epprd_rg:process_resources[start_or_stop_applications_for_rg:261] STATUS_FILE=/var/hacmp/log/.process_resources_applications.21954878.epprd_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:264] : Use clcallev to run the event +epprd_rg:process_resources[start_or_stop_applications_for_rg:266] clcallev stop_server 'epprd_app ' +epprd_rg:process_resources[process_applications:384] : Add PID of the last bg start_or_stop_applications_for_rg process to WAITPIDS. +epprd_rg:process_resources[process_applications:386] WAITPIDS=' 26018292' +epprd_rg:process_resources[process_applications:390] : Wait for the start_or_stop_applications_for_rg PIDs to finish. +epprd_rg:process_resources[process_applications:393] wait 26018292 Jan 28 2023 18:00:06 EVENT START: stop_server epprd_app |2023-01-28T18:00:06|22169|EVENT START: stop_server epprd_app | +epprd_rg:stop_server[+59] version=%I% +epprd_rg:stop_server[+62] STATUS=0 +epprd_rg:stop_server[+66] [ ! -n ] +epprd_rg:stop_server[+68] EMULATE=REAL +epprd_rg:stop_server[+71] PROC_RES=false +epprd_rg:stop_server[+75] [[ APPLICATIONS != 0 ]] +epprd_rg:stop_server[+75] [[ APPLICATIONS != GROUP ]] +epprd_rg:stop_server[+76] PROC_RES=true +epprd_rg:stop_server[+79] typeset WPARNAME WPARDIR EXEC +epprd_rg:stop_server[+80] WPARDIR= +epprd_rg:stop_server[+81] EXEC= +epprd_rg:stop_server[+83] typeset -i rc=0 +epprd_rg:stop_server[+84] +epprd_rg:stop_server[+84] clwparname epprd_rg +epprd_rg:clwparname[38] version=1.3.1.1 +epprd_rg:clwparname[44] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clwparname[44] [[ -z '' ]] +epprd_rg:clwparname[44] exit 0 WPARNAME= +epprd_rg:stop_server[+85] rc=0 +epprd_rg:stop_server[+87] set -u +epprd_rg:stop_server[+90] ALLSERVERS=All_servers +epprd_rg:stop_server[+91] [ REAL = EMUL ] +epprd_rg:stop_server[+96] cl_RMupdate resource_releasing All_servers stop_server 2023-01-28T18:00:06.306007 2023-01-28T18:00:06.310267 +epprd_rg:stop_server[+101] (( 0 == 0 )) +epprd_rg:stop_server[+101] [[ -n ]] +epprd_rg:stop_server[+120] +epprd_rg:stop_server[+120] cllsserv -cn epprd_app +epprd_rg:stop_server[+120] cut -d: -f3 STOP=/etc/hacmp/epprd_stop.sh +epprd_rg:stop_server[+121] +epprd_rg:stop_server[+121] echo /etc/hacmp/epprd_stop.sh +epprd_rg:stop_server[+121] cut -d -f1 STOP_SCRIPT=/etc/hacmp/epprd_stop.sh +epprd_rg:stop_server[+123] PATTERN=epprda epprd_app +epprd_rg:stop_server[+123] [[ -n ]] +epprd_rg:stop_server[+123] [[ -z ]] +epprd_rg:stop_server[+123] [[ -x /etc/hacmp/epprd_stop.sh ]] +epprd_rg:stop_server[+133] [ REAL = EMUL ] +epprd_rg:stop_server[+139] amlog_trace Stopping application controller|epprd_app +epprd_rg:stop_server[+55] clcycle clavailability.log +epprd_rg:stop_server[+55] 1> /dev/null 2>& 1 +epprd_rg:stop_server[+55] +epprd_rg:stop_server[+55] cltime DATE=2023-01-28T18:00:06.345425 +epprd_rg:stop_server[+55] echo |2023-01-28T18:00:06.345425|INFO: Stopping application controller|epprd_app +epprd_rg:stop_server[+55] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:stop_server[+140] /etc/hacmp/epprd_stop.sh +epprd_rg:stop_server[+140] ODMDIR=/etc/objrepos +epprd_rg:stop_server[+141] rc=0 +epprd_rg:stop_server[+143] (( rc != 0 )) +epprd_rg:stop_server[+151] amlog_trace Stopping application controller|epprd_app +epprd_rg:stop_server[+55] clcycle clavailability.log +epprd_rg:stop_server[+55] 1> /dev/null 2>& 1 +epprd_rg:stop_server[+55] +epprd_rg:stop_server[+55] cltime DATE=2023-01-28T18:00:06.374249 +epprd_rg:stop_server[+55] echo |2023-01-28T18:00:06.374249|INFO: Stopping application controller|epprd_app +epprd_rg:stop_server[+55] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:stop_server[+174] ALLNOERRSERV=All_nonerror_servers +epprd_rg:stop_server[+175] [ REAL = EMUL ] +epprd_rg:stop_server[+180] cl_RMupdate resource_down All_nonerror_servers stop_server 2023-01-28T18:00:06.396665 2023-01-28T18:00:06.400904 +epprd_rg:stop_server[+183] exit 0 Jan 28 2023 18:00:06 EVENT COMPLETED: stop_server epprd_app 0 |2023-01-28T18:00:06|22169|EVENT COMPLETED: stop_server epprd_app 0| +epprd_rg:process_resources[start_or_stop_applications_for_rg:267] RC=0 +epprd_rg:process_resources[start_or_stop_applications_for_rg:269] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[start_or_stop_applications_for_rg:279] (( 0 != 0 )) +epprd_rg:process_resources[start_or_stop_applications_for_rg:291] : Store the result for later accumulation +epprd_rg:process_resources[start_or_stop_applications_for_rg:293] print 'epprd_rg 0' +epprd_rg:process_resources[start_or_stop_applications_for_rg:293] 1>> /var/hacmp/log/.process_resources_applications.21954878.epprd_rg +epprd_rg:process_resources[process_applications:396] : Look at all the status files to see if any were unsuccessful +epprd_rg:process_resources[process_applications:399] cat /var/hacmp/log/.process_resources_applications.21954878.epprd_rg +epprd_rg:process_resources[process_applications:399] read skip SUCCESS rest +epprd_rg:process_resources[process_applications:401] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:411] rm -f /var/hacmp/log/.process_resources_applications.21954878.epprd_rg +epprd_rg:process_resources[process_applications:416] : Release lpar resources in one-shot now that applications are stopped +epprd_rg:process_resources[process_applications:418] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[process_applications:420] GROUPNAME=epprd_rg +epprd_rg:process_resources[process_applications:420] export GROUPNAME +epprd_rg:process_resources[process_applications:421] clmanageroha -o release -s -l epprd_app +epprd_rg:process_resources[process_applications:421] 3>& 2 +epprd_rg:clmanageroha[318] : version='@(#)' 5881272 43haes/usr/sbin/cluster/events/clmanageroha.sh, 61aha_r726, 2205A_aha726, May 16 2022 12:15 PM +epprd_rg:clmanageroha[321] clodmget -n -f connection_type HACMPhmcparam +epprd_rg:clmanageroha[321] CONN_TYPE=0 +epprd_rg:clmanageroha[321] typeset -i CONN_TYPE +epprd_rg:clmanageroha[323] clodmget -q name='epprda and object like POWERVS_*' -nf name HACMPnode +epprd_rg:clmanageroha[323] 2> /dev/null +epprd_rg:clmanageroha[323] [[ -n '' ]] +epprd_rg:clmanageroha[326] export CONN_TYPE +epprd_rg:clmanageroha[331] roha_session_open -o release -s -l epprd_app +epprd_rg:clmanageroha[roha_session_open:131] roha_session.id=26018298 +epprd_rg:clmanageroha[roha_session_open:132] date +epprd_rg:clmanageroha[roha_session_open:132] LC_ALL=C +epprd_rg:clmanageroha[roha_session_open:132] roha_session_log 'Open session 26018298 at Sat Jan 28 18:00:06 KORST 2023' [ROHALOG:26018298:(0.066)] Open session 26018298 at Sat Jan 28 18:00:06 KORST 2023 +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:146] roha_session.operation=release +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:143] roha_session.systemmirror_mode=1 +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:149] roha_session.optimal_apps=epprd_app +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:163] [[ release != @(acquire|release|adjust) ]] +epprd_rg:clmanageroha[roha_session_open:168] no_roha_apps=0 +epprd_rg:clmanageroha[roha_session_open:168] typeset -i no_roha_apps +epprd_rg:clmanageroha[roha_session_open:169] need_explicit_res_rel=0 +epprd_rg:clmanageroha[roha_session_open:169] typeset -i need_explicit_res_rel +epprd_rg:clmanageroha[roha_session_open:187] [[ -n epprd_app ]] +epprd_rg:clmanageroha[roha_session_open:187] sort +epprd_rg:clmanageroha[roha_session_open:187] clmgr q roha +epprd_rg:clmanageroha[roha_session_open:187] uniq -d +epprd_rg:clmanageroha[roha_session_open:187] echo epprd_app +epprd_rg:clmanageroha[roha_session_open:187] sort -u +epprd_rg:clmanageroha[roha_session_open:187] echo '\nepprd_app' +epprd_rg:clmanageroha[roha_session_open:187] [[ -z '' ]] +epprd_rg:clmanageroha[roha_session_open:189] roha_session_log 'INFO: No ROHA configured on applications.\n' [ROHALOG:26018298:(0.519)] INFO: No ROHA configured on applications. [ROHALOG:26018298:(0.519)] +epprd_rg:clmanageroha[roha_session_open:190] no_roha_apps=1 +epprd_rg:clmanageroha[roha_session_open:195] read_tunables +epprd_rg:clmanageroha[roha_session_open:196] echo '' +epprd_rg:clmanageroha[roha_session_open:196] grep -q epprda +epprd_rg:clmanageroha[roha_session_open:197] (( 1 == 0 )) +epprd_rg:clmanageroha[roha_session_open:202] (( 1 == 1 )) +epprd_rg:clmanageroha[roha_session_open:203] roha_session_read_odm_dynresop DLPAR_MEM +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_MEM -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:203] (( 0 == 0.00 )) +epprd_rg:clmanageroha[roha_session_open:204] roha_session_read_odm_dynresop DLPAR_PROCS +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_PROCS -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:204] (( 0 == 0 )) +epprd_rg:clmanageroha[roha_session_open:205] roha_session_read_odm_dynresop DLPAR_PROC_UNITS +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_PROC_UNITS -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:205] (( 0 == 0.00 )) +epprd_rg:clmanageroha[roha_session_open:206] roha_session_log 'INFO: Nothing to be done.\n' [ROHALOG:26018298:(0.579)] INFO: Nothing to be done. [ROHALOG:26018298:(0.579)] +epprd_rg:clmanageroha[roha_session_open:207] exit 0 +epprd_rg:process_resources[process_applications:422] RC=0 +epprd_rg:process_resources[process_applications:423] (( 0 != 0 )) +epprd_rg:process_resources[process_applications:433] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:434] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:435] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:439] return 0 +epprd_rg:process_resources[3550] RC=0 +epprd_rg:process_resources[3551] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3553] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:00:07.084178 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=MOUNT_FILESYSTEMS ACTION=RELEASE FILE_SYSTEMS='"/board;/board_org"' RESOURCE_GROUPS='"epprd_rg' '"' NFS_NETWORKS='""' NFS_HOSTS='""' IP_LABELS='""' +epprd_rg:process_resources[1] JOB_TYPE=MOUNT_FILESYSTEMS +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] FILE_SYSTEMS='/board;/board_org' +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] NFS_NETWORKS='' +epprd_rg:process_resources[1] NFS_HOSTS='' +epprd_rg:process_resources[1] IP_LABELS='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ MOUNT_FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources[3360] [[ MOUNT_FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources[3612] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[3616] unmount_nfs_filesystems +epprd_rg:process_resources[unmount_nfs_filesystems:1397] PS4_FUNC=unmount_nfs_filesystems +epprd_rg:process_resources[unmount_nfs_filesystems:1397] typeset PS4_FUNC +epprd_rg:process_resources[unmount_nfs_filesystems:1398] [[ high == high ]] +epprd_rg:process_resources[unmount_nfs_filesystems:1398] set -x +epprd_rg:process_resources[unmount_nfs_filesystems:1400] STAT=0 +epprd_rg:process_resources[unmount_nfs_filesystems:1402] cl_deactivate_nfs +epprd_rg:cl_deactivate_nfs[+75] [[ high == high ]] +epprd_rg:cl_deactivate_nfs[+75] version=1.2.5.1 $Source$ +epprd_rg:cl_deactivate_nfs[+77] STATUS=0 +epprd_rg:cl_deactivate_nfs[+78] PIDLIST= +epprd_rg:cl_deactivate_nfs[+80] set -u +epprd_rg:cl_deactivate_nfs[+154] PROC_RES=false +epprd_rg:cl_deactivate_nfs[+158] [[ MOUNT_FILESYSTEMS != 0 ]] +epprd_rg:cl_deactivate_nfs[+158] [[ MOUNT_FILESYSTEMS != GROUP ]] +epprd_rg:cl_deactivate_nfs[+159] PROC_RES=true +epprd_rg:cl_deactivate_nfs[+175] export GROUPNAME +epprd_rg:cl_deactivate_nfs[+175] [[ true == true ]] +epprd_rg:cl_deactivate_nfs[+178] get_list_head /board;/board_org +epprd_rg:cl_deactivate_nfs[+178] read UNSORTED_FILELIST +epprd_rg:cl_deactivate_nfs[+179] get_list_tail /board;/board_org +epprd_rg:cl_deactivate_nfs[+179] read FILE_SYSTEMS +epprd_rg:cl_deactivate_nfs[+186] +epprd_rg:cl_deactivate_nfs[+186] /bin/echo /board;/board_org +epprd_rg:cl_deactivate_nfs[+186] /bin/sort -r FILELIST=/board;/board_org +epprd_rg:cl_deactivate_nfs[+188] echo /board;/board_org +epprd_rg:cl_deactivate_nfs[+188] grep -q \;/ +epprd_rg:cl_deactivate_nfs[+189] CROSSMOUNT=1 +epprd_rg:cl_deactivate_nfs[+189] [[ 1 != 0 ]] +epprd_rg:cl_deactivate_nfs[+194] +epprd_rg:cl_deactivate_nfs[+194] /bin/echo /board;/board_org +epprd_rg:cl_deactivate_nfs[+194] /bin/sort -k 1,1r -t; MNT=/board;/board_org +epprd_rg:cl_deactivate_nfs[+200] ALLNFS=All_nfs_mounts +epprd_rg:cl_deactivate_nfs[+201] cl_RMupdate resource_releasing All_nfs_mounts cl_deactivate_nfs 2023-01-28T18:00:07.139671 2023-01-28T18:00:07.144938 +epprd_rg:cl_deactivate_nfs[+203] +epprd_rg:cl_deactivate_nfs[+203] odmget -q name=RECOVERY_METHOD AND group=epprd_rg HACMPresource +epprd_rg:cl_deactivate_nfs[+203] grep value +epprd_rg:cl_deactivate_nfs[+203] awk {print $3} +epprd_rg:cl_deactivate_nfs[+203] sed s/"//g METHOD=sequential +epprd_rg:cl_deactivate_nfs[+206] typeset PS4_LOOP=/board;/board_org +epprd_rg:cl_deactivate_nfs:/board;/board_org[+207] (( 1 != 0 )) +epprd_rg:cl_deactivate_nfs:/board;/board_org[+209] +epprd_rg:cl_deactivate_nfs:/board;/board_org[+209] cut -f2 -d; +epprd_rg:cl_deactivate_nfs:/board;/board_org[+209] echo /board;/board_org fs=/board_org +epprd_rg:cl_deactivate_nfs:/board;/board_org[+210] +epprd_rg:cl_deactivate_nfs:/board;/board_org[+210] cut -f1 -d; +epprd_rg:cl_deactivate_nfs:/board;/board_org[+210] echo /board;/board_org mnt=/board +epprd_rg:cl_deactivate_nfs:/board;/board_org[+220] +epprd_rg:cl_deactivate_nfs:/board;/board_org[+220] awk -v MFS=/board BEGIN {MFS=sprintf("^%s$", MFS)} \ match($4, "nfs") && match($3, MFS) {print $2} +epprd_rg:cl_deactivate_nfs:/board;/board_org[+220] mount f=/board_org +epprd_rg:cl_deactivate_nfs:/board;/board_org[+220] [[ /board_org == /board_org ]] +epprd_rg:cl_deactivate_nfs:/board;/board_org[+227] pid= +epprd_rg:cl_deactivate_nfs:/board;/board_org[+227] [[ sequential == sequential ]] +epprd_rg:cl_deactivate_nfs:/board;/board_org[+227] [[ rg_move == node_down ]] +epprd_rg:cl_deactivate_nfs:/board;/board_org[+227] [[ rg_move == rg_move ]] +epprd_rg:cl_deactivate_nfs:/board;/board_org[+252] pid=27525458 +epprd_rg:cl_deactivate_nfs:/board;/board_org[+252] [[ -n 27525458 ]] +epprd_rg:cl_deactivate_nfs:/board;/board_org[+251] do_umount /board +epprd_rg:cl_deactivate_nfs(0):/board;/board_org[do_umount+4] typeset fs=/board +epprd_rg:cl_deactivate_nfs(0):/board;/board_org[do_umount+31] cl_nfskill -k -u /board +epprd_rg:cl_deactivate_nfs:/board;/board_org[+264] grep -qw 27525458 +epprd_rg:cl_deactivate_nfs:/board;/board_org[+264] echo +epprd_rg:cl_deactivate_nfs:/board;/board_org[+267] (( 1 != 0 )) +epprd_rg:cl_deactivate_nfs:/board;/board_org[+268] PIDLIST= 27525458 +epprd_rg:cl_deactivate_nfs:/board;/board_org[+274] unset PS4_LOOP +epprd_rg:cl_deactivate_nfs[+279] wait 27525458 +epprd_rg:cl_deactivate_nfs(0):/board;/board_org[do_umount+33] sleep 2 +epprd_rg:cl_deactivate_nfs(2):/board;/board_org[do_umount+34] cl_nfskill -k -u /board +epprd_rg:cl_deactivate_nfs(2):/board;/board_org[do_umount+36] sleep 2 +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+39] amlog_trace Deactivating NFS|/board +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] clcycle clavailability.log +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] cltime DATE=2023-01-28T18:00:11.194772 +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] echo |2023-01-28T18:00:11.194772|INFO: Deactivating NFS|/board +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+40] typeset COUNT=20 +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+41] true +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+42] date +%h %d %H:%M:%S.000 +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+42] : Attempt 1 of 20 to unmount at Jan 28 18:00:11.000 +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+43] umount -f /board forced unmount of /board +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+44] (( 0 != 0 )) +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+61] amlog_trace Deactivating NFS|/board +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] clcycle clavailability.log +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] cltime DATE=2023-01-28T18:00:11.235467 +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] echo |2023-01-28T18:00:11.235467|INFO: Deactivating NFS|/board +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+62] break +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+65] return 0 +epprd_rg:cl_deactivate_nfs[+280] (( 0 != 0 )) +epprd_rg:cl_deactivate_nfs[+291] ALLNOERRNFS=All_nonerror_nfs_mounts +epprd_rg:cl_deactivate_nfs[+292] cl_RMupdate resource_down All_nonerror_nfs_mounts cl_deactivate_nfs 2023-01-28T18:00:11.258600 2023-01-28T18:00:11.262894 +epprd_rg:cl_deactivate_nfs[+295] exit 0 +epprd_rg:process_resources[unmount_nfs_filesystems:1403] RC=0 +epprd_rg:process_resources[unmount_nfs_filesystems:1406] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[unmount_nfs_filesystems:1420] (( 0 != 0 )) +epprd_rg:process_resources[unmount_nfs_filesystems:1426] return 0 +epprd_rg:process_resources[3617] RC=0 +epprd_rg:process_resources[3618] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3620] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:00:11.275642 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=EXPORT_FILESYSTEMS ACTION=RELEASE EXPORT_FILE_SYSTEMS='"/board_org"' EXPORT_FILE_SYSTEMS_V4='""' RESOURCE_GROUPS='"epprd_rg' '"' STABLE_STORAGE_PATH='""' IP_LABELS='""' DAEMONS='"NFS' '"' +epprd_rg:process_resources[1] JOB_TYPE=EXPORT_FILESYSTEMS +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] EXPORT_FILE_SYSTEMS=/board_org +epprd_rg:process_resources[1] EXPORT_FILE_SYSTEMS_V4='' +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] STABLE_STORAGE_PATH='' +epprd_rg:process_resources[1] IP_LABELS='' +epprd_rg:process_resources[1] DAEMONS='NFS ' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ EXPORT_FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources[3360] [[ EXPORT_FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources[3595] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[3599] unexport_filesystems +epprd_rg:process_resources[unexport_filesystems:1576] PS4_FUNC=unexport_filesystems +epprd_rg:process_resources[unexport_filesystems:1576] typeset PS4_FUNC +epprd_rg:process_resources[unexport_filesystems:1577] [[ high == high ]] +epprd_rg:process_resources[unexport_filesystems:1577] set -x +epprd_rg:process_resources[unexport_filesystems:1578] STAT=0 +epprd_rg:process_resources[unexport_filesystems:1579] NFSSTOPPED=0 +epprd_rg:process_resources[unexport_filesystems:1580] RPCSTOPPED=0 +epprd_rg:process_resources[unexport_filesystems:1582] export NFSSTOPPED +epprd_rg:process_resources[unexport_filesystems:1585] : For NFSv4, cl_unexport_fs will use STABLE_STORAGE_PATH, which is set by +epprd_rg:process_resources[unexport_filesystems:1586] : clRGPA and can have colon-separated values for multiple RGs. +epprd_rg:process_resources[unexport_filesystems:1587] : We will save off clRGPA values in stable_storage_path and then extract +epprd_rg:process_resources[unexport_filesystems:1588] : each RG into STABLE_STORAGE_PATH for cl_unexport_fs. +epprd_rg:process_resources[unexport_filesystems:1590] stable_storage_path='' +epprd_rg:process_resources[unexport_filesystems:1590] typeset stable_storage_path +epprd_rg:process_resources[unexport_filesystems:1594] export GROUPNAME +epprd_rg:process_resources[unexport_filesystems:1596] get_list_head /board_org +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo /board_org +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo /board_org +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[unexport_filesystems:1596] read LIST_OF_EXPORT_FILE_SYSTEMS_FOR_RG +epprd_rg:process_resources[unexport_filesystems:1597] get_list_tail /board_org +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo /board_org +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[unexport_filesystems:1597] read EXPORT_FILE_SYSTEMS +epprd_rg:process_resources[unexport_filesystems:1599] get_list_head +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[unexport_filesystems:1599] read LIST_OF_EXPORT_FILE_SYSTEMS_V4_FOR_RG +epprd_rg:process_resources[unexport_filesystems:1600] get_list_tail +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[unexport_filesystems:1600] read EXPORT_FILE_SYSTEMS_V4 +epprd_rg:process_resources[unexport_filesystems:1601] get_list_head +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[unexport_filesystems:1601] read STABLE_STORAGE_PATH +epprd_rg:process_resources[unexport_filesystems:1602] get_list_tail +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[unexport_filesystems:1602] read stable_storage_path +epprd_rg:process_resources[unexport_filesystems:1604] cl_unexport_fs /board_org '' +epprd_rg:cl_unexport_fs[136] version=%I% +epprd_rg:cl_unexport_fs[139] . /usr/es/sbin/cluster/events/utils/cl_nfs_utils +epprd_rg:cl_unexport_fs[98] PROGNAME=cl_unexport_fs +epprd_rg:cl_unexport_fs[99] [[ high == high ]] +epprd_rg:cl_unexport_fs[101] set -x +epprd_rg:cl_unexport_fs[102] version=%I +epprd_rg:cl_unexport_fs[105] cl_exports_data='' +epprd_rg:cl_unexport_fs[105] typeset cl_exports_data +epprd_rg:cl_unexport_fs[106] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_unexport_fs[141] UNEXPORT_V3=/board_org +epprd_rg:cl_unexport_fs[142] UNEXPORT_V4='' +epprd_rg:cl_unexport_fs[144] STATUS=0 +epprd_rg:cl_unexport_fs[146] PROC_RES=false +epprd_rg:cl_unexport_fs[150] [[ EXPORT_FILESYSTEMS != 0 ]] +epprd_rg:cl_unexport_fs[150] [[ EXPORT_FILESYSTEMS != GROUP ]] +epprd_rg:cl_unexport_fs[151] PROC_RES=true +epprd_rg:cl_unexport_fs[154] set -u +epprd_rg:cl_unexport_fs[156] (( 2 != 2 )) +epprd_rg:cl_unexport_fs[162] [[ __AIX__ == __AIX__ ]] +epprd_rg:cl_unexport_fs[164] oslevel -r +epprd_rg:cl_unexport_fs[164] cut -c1-2 +epprd_rg:cl_unexport_fs[164] (( 72 > 52 )) +epprd_rg:cl_unexport_fs[166] FORCE=-F +epprd_rg:cl_unexport_fs[180] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_unexport_fs[181] DARE_EVENT=reconfig_resource_release +epprd_rg:cl_unexport_fs[184] unexport_v4='' +epprd_rg:cl_unexport_fs[185] [[ -z '' ]] +epprd_rg:cl_unexport_fs[185] [[ rg_move == reconfig_resource_release ]] +epprd_rg:cl_unexport_fs[196] [[ -z '' ]] +epprd_rg:cl_unexport_fs[196] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_unexport_fs[198] unexport_v3='' +epprd_rg:cl_unexport_fs[204] getline_exports /board_org +epprd_rg:cl_unexport_fs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_unexport_fs[getline_exports:45] line='' +epprd_rg:cl_unexport_fs[getline_exports:45] typeset line +epprd_rg:cl_unexport_fs[getline_exports:46] flag=0 +epprd_rg:cl_unexport_fs[getline_exports:46] typeset -i flag +epprd_rg:cl_unexport_fs[getline_exports:47] fs=/board_org +epprd_rg:cl_unexport_fs[getline_exports:49] [[ -z /board_org ]] +epprd_rg:cl_unexport_fs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_unexport_fs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_unexport_fs[getline_exports:56] read -r line +epprd_rg:cl_unexport_fs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_unexport_fs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_unexport_fs[getline_exports:69] (( 1 == 0 )) +epprd_rg:cl_unexport_fs[getline_exports:74] [[ 0 == 1 ]] +epprd_rg:cl_unexport_fs[getline_exports:56] read -r line +epprd_rg:cl_unexport_fs[getline_exports:89] return 0 +epprd_rg:cl_unexport_fs[205] export_line='' +epprd_rg:cl_unexport_fs[210] echo +epprd_rg:cl_unexport_fs[210] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_unexport_fs[211] cut -d- -f2- +epprd_rg:cl_unexport_fs[211] tr , ' ' +epprd_rg:cl_unexport_fs[210] options='' +epprd_rg:cl_unexport_fs[217] vers_missing=1 +epprd_rg:cl_unexport_fs[240] (( vers_missing )) +epprd_rg:cl_unexport_fs[240] unexport_v3=' /board_org' +epprd_rg:cl_unexport_fs[243] UNEXPORT_V3=' /board_org' +epprd_rg:cl_unexport_fs[244] UNEXPORT_V4='' +epprd_rg:cl_unexport_fs[247] hasrv='' +epprd_rg:cl_unexport_fs[249] [[ -z '' ]] +epprd_rg:cl_unexport_fs[251] query=name='STABLE_STORAGE_PATH AND group=epprd_rg' +epprd_rg:cl_unexport_fs[252] odmget -q name='STABLE_STORAGE_PATH AND group=epprd_rg' HACMPresource +epprd_rg:cl_unexport_fs[252] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_unexport_fs[252] STABLE_STORAGE_PATH='' +epprd_rg:cl_unexport_fs[256] [[ -z '' ]] +epprd_rg:cl_unexport_fs[258] STABLE_STORAGE_PATH=/var/adm/nfsv4.hacmp/epprd_rg +epprd_rg:cl_unexport_fs[261] [[ -z '' ]] +epprd_rg:cl_unexport_fs[263] query=name='SERVICE_LABEL AND group=epprd_rg' +epprd_rg:cl_unexport_fs[264] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_unexport_fs[264] odmget -q name='SERVICE_LABEL AND group=epprd_rg' HACMPresource +epprd_rg:cl_unexport_fs[264] SERVICE_LABEL=epprd +epprd_rg:cl_unexport_fs[268] ps -eo args +epprd_rg:cl_unexport_fs[268] grep -w nfsd +epprd_rg:cl_unexport_fs[268] grep -qw -- '-gp on' +epprd_rg:cl_unexport_fs[272] gp=off +epprd_rg:cl_unexport_fs[275] /usr/sbin/bootinfo -K +epprd_rg:cl_unexport_fs[275] KERNEL_BITS=64 +epprd_rg:cl_unexport_fs[277] [[ off == on ]] +epprd_rg:cl_unexport_fs[282] NFSv4_REGISTERED=0 +epprd_rg:cl_unexport_fs[286] V3=:2:3 +epprd_rg:cl_unexport_fs[287] V4=:4 +epprd_rg:cl_unexport_fs[289] [[ rg_move != reconfig_resource_release ]] +epprd_rg:cl_unexport_fs[290] [[ rg_move != release_vg_fs ]] +epprd_rg:cl_unexport_fs[298] [[ -n '' ]] +epprd_rg:cl_unexport_fs[321] V3='' +epprd_rg:cl_unexport_fs[322] V4='' +epprd_rg:cl_unexport_fs[326] ALLEXPORTS=All_exports +epprd_rg:cl_unexport_fs[328] cl_RMupdate resource_releasing All_exports cl_unexport_fs 2023-01-28T18:00:11.504811 2023-01-28T18:00:11.509091 +epprd_rg:cl_unexport_fs[330] tr ' ' '\n' +epprd_rg:cl_unexport_fs[330] echo /board_org +epprd_rg:cl_unexport_fs[330] sort +epprd_rg:cl_unexport_fs[330] FILESYSTEM_LIST=/board_org +epprd_rg:cl_unexport_fs[334] v3='' +epprd_rg:cl_unexport_fs[335] v4='' +epprd_rg:cl_unexport_fs[336] root='' +epprd_rg:cl_unexport_fs[337] old_options='' +epprd_rg:cl_unexport_fs[338] new_options='' +epprd_rg:cl_unexport_fs[340] exportfs +epprd_rg:cl_unexport_fs[340] grep '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_unexport_fs[340] export_line='/board_org -root=epprd:epprda:epprds' +epprd_rg:cl_unexport_fs[342] [[ -z '/board_org -root=epprd:epprda:epprds' ]] +epprd_rg:cl_unexport_fs[344] echo /board_org -root=epprd:epprda:epprds +epprd_rg:cl_unexport_fs[344] cut '-d ' -f2- +epprd_rg:cl_unexport_fs[344] cut -d- -f2- +epprd_rg:cl_unexport_fs[344] tr , ' ' +epprd_rg:cl_unexport_fs[344] old_options=root=epprd:epprda:epprds +epprd_rg:cl_unexport_fs[365] new_options=,root=epprd:epprda:epprds +epprd_rg:cl_unexport_fs[371] [[ -z '' ]] +epprd_rg:cl_unexport_fs[371] v3='' +epprd_rg:cl_unexport_fs[377] NFS_VER3='' +epprd_rg:cl_unexport_fs[380] [[ /board_org == /board_org ]] +epprd_rg:cl_unexport_fs[380] v3='' +epprd_rg:cl_unexport_fs[380] NFS_VER3=3 +epprd_rg:cl_unexport_fs[380] break +epprd_rg:cl_unexport_fs[382] NFS_VER4='' +epprd_rg:cl_unexport_fs[387] [[ '' == 4 ]] +epprd_rg:cl_unexport_fs[400] echo '' +epprd_rg:cl_unexport_fs[400] cut -d: -f2- +epprd_rg:cl_unexport_fs[400] vers='' +epprd_rg:cl_unexport_fs[402] [[ -z '' ]] +epprd_rg:cl_unexport_fs[404] [[ '' == 4 ]] +epprd_rg:cl_unexport_fs[408] exportfs -i -u -F /board_org exportfs: unexported /board_org +epprd_rg:cl_unexport_fs[410] (( 0 != 0 )) +epprd_rg:cl_unexport_fs[417] continue +epprd_rg:cl_unexport_fs[452] [[ -n '' ]] +epprd_rg:cl_unexport_fs[480] ALLNOERREXPORT=All_nonerror_exports +epprd_rg:cl_unexport_fs[482] cl_RMupdate resource_down All_nonerror_exports cl_unexport_fs 2023-01-28T18:00:11.566842 2023-01-28T18:00:11.571199 +epprd_rg:cl_unexport_fs[484] exit 0 +epprd_rg:process_resources[unexport_filesystems:1608] return 0 +epprd_rg:process_resources[3600] RC=0 +epprd_rg:process_resources[3601] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3603] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:00:11.584168 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=FILESYSTEMS ACTION=RELEASE FILE_SYSTEMS='"/usr/sap,/sapmnt,/oracle/EPP/sapdata4,/oracle/EPP/sapdata3,/oracle/EPP/sapdata2,/oracle/EPP/sapdata1,/oracle/EPP/origlogB,/oracle/EPP/origlogA,/oracle/EPP/oraarch,/oracle/EPP/mirrlogB,/oracle/EPP/mirrlogA,/oracle/EPP,/oracle,/board_org"' RESOURCE_GROUPS='"epprd_rg' '"' FSCHECK_TOOLS='""' RECOVERY_METHODS='"sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential"' +epprd_rg:process_resources[1] JOB_TYPE=FILESYSTEMS +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] FILE_SYSTEMS=/usr/sap,/sapmnt,/oracle/EPP/sapdata4,/oracle/EPP/sapdata3,/oracle/EPP/sapdata2,/oracle/EPP/sapdata1,/oracle/EPP/origlogB,/oracle/EPP/origlogA,/oracle/EPP/oraarch,/oracle/EPP/mirrlogB,/oracle/EPP/mirrlogA,/oracle/EPP,/oracle,/board_org +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] FSCHECK_TOOLS='' +epprd_rg:process_resources[1] RECOVERY_METHODS=sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources[3360] [[ FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources[3482] process_file_systems RELEASE +epprd_rg:process_resources[process_file_systems:2640] PS4_FUNC=process_file_systems +epprd_rg:process_resources[process_file_systems:2640] typeset PS4_FUNC +epprd_rg:process_resources[process_file_systems:2641] [[ high == high ]] +epprd_rg:process_resources[process_file_systems:2641] set -x +epprd_rg:process_resources[process_file_systems:2643] STAT=0 +epprd_rg:process_resources[process_file_systems:2645] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[process_file_systems:2667] cl_deactivate_fs +epprd_rg:cl_deactivate_fs[860] version=1.6 +epprd_rg:cl_deactivate_fs[863] STATUS=0 +epprd_rg:cl_deactivate_fs[863] typeset -li STATUS +epprd_rg:cl_deactivate_fs[864] SLEEP=1 +epprd_rg:cl_deactivate_fs[864] typeset -li SLEEP +epprd_rg:cl_deactivate_fs[865] LIMIT=60 +epprd_rg:cl_deactivate_fs[865] typeset -li LIMIT +epprd_rg:cl_deactivate_fs[866] export SLEEP +epprd_rg:cl_deactivate_fs[867] export LIMIT +epprd_rg:cl_deactivate_fs[868] TMP_FILENAME=_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs[870] (( 0 != 0 )) +epprd_rg:cl_deactivate_fs[875] OEM_CALL=false +epprd_rg:cl_deactivate_fs[879] : Check here to see if the forced unmount option can be used +epprd_rg:cl_deactivate_fs[881] FORCE_OK='' +epprd_rg:cl_deactivate_fs[881] export FORCE_OK +epprd_rg:cl_deactivate_fs[882] O_FlAG='' +epprd_rg:cl_deactivate_fs[882] export O_FlAG +epprd_rg:cl_deactivate_fs[885] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_deactivate_fs[886] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_deactivate_fs[887] : 99.99.999.999 +epprd_rg:cl_deactivate_fs[889] typeset -li V R M F +epprd_rg:cl_deactivate_fs[890] typeset -Z2 R +epprd_rg:cl_deactivate_fs[891] typeset -Z3 M +epprd_rg:cl_deactivate_fs[892] typeset -Z3 F +epprd_rg:cl_deactivate_fs[893] jfs2_lvl=601002000 +epprd_rg:cl_deactivate_fs[893] typeset -li jfs2_lvl +epprd_rg:cl_deactivate_fs[894] fuser_lvl=601004000 +epprd_rg:cl_deactivate_fs[894] typeset -li fuser_lvl +epprd_rg:cl_deactivate_fs[895] VRMF=0 +epprd_rg:cl_deactivate_fs[895] typeset -li VRMF +epprd_rg:cl_deactivate_fs[898] : Here try and figure out what level of JFS2 is installed +epprd_rg:cl_deactivate_fs[900] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_deactivate_fs[900] cut -f3 -d: +epprd_rg:cl_deactivate_fs[900] read V R M F +epprd_rg:cl_deactivate_fs[900] IFS=. +epprd_rg:cl_deactivate_fs[901] VRMF=702005102 +epprd_rg:cl_deactivate_fs[903] (( 702005102 >= 601002000 )) +epprd_rg:cl_deactivate_fs[906] : JFS2 at this level that supports forced unmount +epprd_rg:cl_deactivate_fs[908] FORCE_OK=true +epprd_rg:cl_deactivate_fs[911] (( 702005102 >= 601004000 )) +epprd_rg:cl_deactivate_fs[914] : fuser at this level supports the -O flag +epprd_rg:cl_deactivate_fs[916] O_FLAG=-O +epprd_rg:cl_deactivate_fs[920] : if JOB_TYPE is set and is not GROUP, then process_resources is parent +epprd_rg:cl_deactivate_fs[922] [[ FILESYSTEMS != 0 ]] +epprd_rg:cl_deactivate_fs[922] [[ FILESYSTEMS != GROUP ]] +epprd_rg:cl_deactivate_fs[923] deactivate_fs_process_resources +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:705] STATUS=0 +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:705] typeset -li STATUS +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:708] : for the temp file, just take the first rg name +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:710] cut -f 1 -d ' ' +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:710] print epprd_rg +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:710] read RES_GRP +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:711] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:714] : Remove the status file if already exists +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:716] rm -f /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:719] : go through all resource groups +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:721] pid_list='' +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:724] export GROUPNAME +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:725] export RECOVERY_METHOD +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:728] : Get a reverse sorted list of the filesystems in this RG so that they +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:729] : release in opposite order of mounting. This is needed for nested mounts. +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:731] print /usr/sap,/sapmnt,/oracle/EPP/sapdata4,/oracle/EPP/sapdata3,/oracle/EPP/sapdata2,/oracle/EPP/sapdata1,/oracle/EPP/origlogB,/oracle/EPP/origlogA,/oracle/EPP/oraarch,/oracle/EPP/mirrlogB,/oracle/EPP/mirrlogA,/oracle/EPP,/oracle,/board_org +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:731] read LIST_OF_FILE_SYSTEMS_FOR_RG FILE_SYSTEMS +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:731] IFS=: +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:732] tr , '\n' +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:732] print /usr/sap,/sapmnt,/oracle/EPP/sapdata4,/oracle/EPP/sapdata3,/oracle/EPP/sapdata2,/oracle/EPP/sapdata1,/oracle/EPP/origlogB,/oracle/EPP/origlogA,/oracle/EPP/oraarch,/oracle/EPP/mirrlogB,/oracle/EPP/mirrlogA,/oracle/EPP,/oracle,/board_org +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:732] sort -ru +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:732] LIST_OF_FILE_SYSTEMS_FOR_RG=$'/usr/sap\n/sapmnt\n/oracle/EPP/sapdata4\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata1\n/oracle/EPP/origlogB\n/oracle/EPP/origlogA\n/oracle/EPP/oraarch\n/oracle/EPP/mirrlogB\n/oracle/EPP/mirrlogA\n/oracle/EPP\n/oracle\n/board_org' +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:733] find_nested_mounts $'/usr/sap\n/sapmnt\n/oracle/EPP/sapdata4\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata1\n/oracle/EPP/origlogB\n/oracle/EPP/origlogA\n/oracle/EPP/oraarch\n/oracle/EPP/mirrlogB\n/oracle/EPP/mirrlogA\n/oracle/EPP\n/oracle\n/board_org' +epprd_rg:cl_deactivate_fs[find_nested_mounts:88] given_fs_list=$'/usr/sap\n/sapmnt\n/oracle/EPP/sapdata4\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata1\n/oracle/EPP/origlogB\n/oracle/EPP/origlogA\n/oracle/EPP/oraarch\n/oracle/EPP/mirrlogB\n/oracle/EPP/mirrlogA\n/oracle/EPP\n/oracle\n/board_org' +epprd_rg:cl_deactivate_fs[find_nested_mounts:88] typeset given_fs_list +epprd_rg:cl_deactivate_fs[find_nested_mounts:90] typeset first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:91] mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:91] mount_out=$' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \n /dev/boardlv /board_org jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw' +epprd_rg:cl_deactivate_fs[find_nested_mounts:91] typeset mount_out +epprd_rg:cl_deactivate_fs[find_nested_mounts:92] discovered_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:92] typeset discovered_fs +epprd_rg:cl_deactivate_fs[find_nested_mounts:93] typeset line fs nested_fs +epprd_rg:cl_deactivate_fs[find_nested_mounts:94] typeset mounted_fs_list +epprd_rg:cl_deactivate_fs[find_nested_mounts:96] fs_count=0 +epprd_rg:cl_deactivate_fs[find_nested_mounts:96] typeset -li fs_count +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \n /dev/boardlv /board_org jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /usr/sap +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=$' /dev/saplv /usr/sap jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- $' /dev/saplv /usr/sap jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 2' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 2 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] echo $' /dev/saplv /usr/sap jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/saplv /usr/sap jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /usr/sap == /usr/sap/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:128] [[ jfs2 == /usr/sap/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n '' ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print 'epdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /usr/sap/trans == /usr/sap/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /usr/sap/trans == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:128] [[ /usr/sap/trans == /usr/sap/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:128] [[ nfs3 == nfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:131] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:132] : exporting_node exported_file_system lower_mount_point vfs +epprd_rg:cl_deactivate_fs[find_nested_mounts:133] : epdev /usr/sap/trans /usr/sap/trans nfs3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:135] nested_fs=/usr/sap/trans +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /usr/sap/trans ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /usr/sap/trans +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \n /dev/boardlv /board_org jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /sapmnt +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/sapmntlv /sapmnt jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/sapmntlv /sapmnt jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \n /dev/boardlv /board_org jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \n /dev/boardlv /board_org jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \n /dev/boardlv /board_org jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \n /dev/boardlv /board_org jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \n /dev/boardlv /board_org jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \n /dev/boardlv /board_org jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \n /dev/boardlv /board_org jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \n /dev/boardlv /board_org jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \n /dev/boardlv /board_org jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \n /dev/boardlv /board_org jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=$' /dev/epplv /oracle/EPP jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- $' /dev/epplv /oracle/EPP jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 10' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 10 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] echo $' /dev/epplv /oracle/EPP jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/epplv /oracle/EPP jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:128] [[ jfs2 == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n '' ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/mirrlogA == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/mirrlogAlv /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/mirrlogA ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/mirrlogB == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/mirrlogBlv /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/mirrlogB ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/oraarch == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/oraarchlv /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/oraarch ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/origlogA == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/origlogAlv /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/origlogA ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/origlogB == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/origlogBlv /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/origlogB ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata1 == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata1lv /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata1 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata2 == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata2lv /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata2 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata3 == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata3lv /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata3 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata4 == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata4lv /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata4 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \n /dev/boardlv /board_org jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=$' /dev/oraclelv /oracle jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- $' /dev/oraclelv /oracle jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 11' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 11 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] echo $' /dev/oraclelv /oracle jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/oraclelv /oracle jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:128] [[ jfs2 == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n '' ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/epplv /oracle/EPP jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/epplv /oracle/EPP +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/mirrlogA == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/mirrlogAlv /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/mirrlogA ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/mirrlogB == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/mirrlogBlv /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/mirrlogB ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/oraarch == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/oraarchlv /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/oraarch ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/origlogA == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/origlogAlv /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/origlogA ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/origlogB == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/origlogBlv /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/origlogB ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata1 == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata1lv /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata1 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata2 == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata2lv /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata2 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata3 == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata3lv /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata3 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata4 == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata4lv /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata4 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \n /dev/boardlv /board_org jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv\nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /board_org +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/boardlv /board_org jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/boardlv /board_org jfs2 Jan 28 17:10 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:150] : Pass comprehensive list to stdout, sorted to get correct unmount order +epprd_rg:cl_deactivate_fs[find_nested_mounts:152] print -- $'/usr/sap\n/sapmnt\n/oracle/EPP/sapdata4\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata1\n/oracle/EPP/origlogB\n/oracle/EPP/origlogA\n/oracle/EPP/oraarch\n/oracle/EPP/mirrlogB\n/oracle/EPP/mirrlogA\n/oracle/EPP\n/oracle\n/board_org' ' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[find_nested_mounts:152] tr ' ' '\n' +epprd_rg:cl_deactivate_fs[find_nested_mounts:152] sort -ru +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:733] LIST_OF_FILE_SYSTEMS_FOR_RG=$'/usr/sap/trans\n/usr/sap\n/sapmnt\n/oracle/EPP/sapdata4\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata1\n/oracle/EPP/origlogB\n/oracle/EPP/origlogA\n/oracle/EPP/oraarch\n/oracle/EPP/mirrlogB\n/oracle/EPP/mirrlogA\n/oracle/EPP\n/oracle\n/board_org' +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:736] : Get the recovery method used for all filesystems in this resource group +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:738] print sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:738] read RECOVERY_METHOD RECOVERY_METHODS +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:738] IFS=: +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:739] print sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:739] cut -f 1 -d , +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:739] RECOVERY_METHOD=sequential +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:742] : verify the recovery method +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:744] RECOVERY_METHOD=sequential +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:745] RECOVERY_METHOD=sequential +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:747] [[ sequential != sequential ]] +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:754] : Tell the cluster manager what we are going to do +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:756] ALLFS=All_filesystems +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:757] cl_RMupdate resource_releasing All_filesystems cl_deactivate_fs 2023-01-28T18:00:11.872319 2023-01-28T18:00:11.876681 +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:760] : now that all variables are set, perform the umounts +epprd_rg:cl_deactivate_fs:/usr/sap/trans[deactivate_fs_process_resources:764] PS4_LOOP=/usr/sap/trans +epprd_rg:cl_deactivate_fs:/usr/sap/trans[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/usr/sap/trans[deactivate_fs_process_resources:770] fs_umount /usr/sap/trans cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.282)[fs_umount:313] FS=/usr/sap/trans +epprd_rg:cl_deactivate_fs(0.282)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(0.282)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(0.282)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(0.282)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.282)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(0.282)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(0.302)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(0.302)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(0.302)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(0.302)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(0.302)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(0.302)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(0.302)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(0.302)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(0.302)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(0.302)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(0.302)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(0.302)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(0.302)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(0.302)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(0.302)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(0.302)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(0.302)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(0.304)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(0.306)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/usr/sap/trans +epprd_rg:cl_deactivate_fs(0.310)[fs_umount:332] fs_type=nfs3 +epprd_rg:cl_deactivate_fs(0.310)[fs_umount:333] [[ nfs3 == nfs* ]] +epprd_rg:cl_deactivate_fs(0.310)[fs_umount:336] : unmount nfs filesystem +epprd_rg:cl_deactivate_fs(0.310)[fs_umount:338] umount /usr/sap/trans +epprd_rg:cl_deactivate_fs(0.316)[fs_umount:358] : append status to the status file +epprd_rg:cl_deactivate_fs(0.316)[fs_umount:360] print -- 0 /usr/sap/trans +epprd_rg:cl_deactivate_fs(0.316)[fs_umount:360] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.316)[fs_umount:361] return 0 +epprd_rg:cl_deactivate_fs:/usr/sap[deactivate_fs_process_resources:764] PS4_LOOP=/usr/sap +epprd_rg:cl_deactivate_fs:/usr/sap[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/usr/sap[deactivate_fs_process_resources:770] fs_umount /usr/sap cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.317)[fs_umount:313] FS=/usr/sap +epprd_rg:cl_deactivate_fs(0.317)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(0.317)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(0.317)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(0.317)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.317)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(0.317)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(0.337)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(0.337)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(0.337)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(0.337)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(0.337)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(0.337)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(0.337)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(0.337)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(0.337)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(0.337)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(0.337)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(0.337)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(0.337)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(0.337)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(0.337)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(0.337)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(0.337)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(0.338)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(0.341)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/usr/sap +epprd_rg:cl_deactivate_fs(0.345)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(0.345)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(0.345)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(0.345)[fs_umount:367] lsfs -c /usr/sap +epprd_rg:cl_deactivate_fs(0.348)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/usr/sap:/dev/saplv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(0.348)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(0.349)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/usr/sap:/dev/saplv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(0.351)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(0.353)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(0.353)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(0.353)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(0.353)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(0.353)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(0.354)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(0.354)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(0.356)[fs_umount:394] awk '{ if ( $1 == "/dev/saplv" ) print $2 }' +epprd_rg:cl_deactivate_fs(0.361)[fs_umount:394] FS_MOUNTED=/usr/sap +epprd_rg:cl_deactivate_fs(0.361)[fs_umount:395] [[ -n /usr/sap ]] +epprd_rg:cl_deactivate_fs(0.361)[fs_umount:397] [[ /usr/sap != /usr/sap ]] +epprd_rg:cl_deactivate_fs(0.361)[fs_umount:409] [[ /usr/sap == / ]] +epprd_rg:cl_deactivate_fs(0.361)[fs_umount:409] [[ /usr/sap == /usr ]] +epprd_rg:cl_deactivate_fs(0.361)[fs_umount:409] [[ /usr/sap == /dev ]] +epprd_rg:cl_deactivate_fs(0.361)[fs_umount:409] [[ /usr/sap == /proc ]] +epprd_rg:cl_deactivate_fs(0.361)[fs_umount:409] [[ /usr/sap == /var ]] +epprd_rg:cl_deactivate_fs(0.361)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/usr/sap' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:11.985683 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:11.985683|INFO: Deactivating Filesystem|/usr/sap' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(0.391)[fs_umount:427] : Try up to 60 times to unmount /usr/sap +epprd_rg:cl_deactivate_fs(0.391)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(0.391)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(0.391)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(0.393)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:00:11.000 +epprd_rg:cl_deactivate_fs(0.393)[fs_umount:434] umount /usr/sap +epprd_rg:cl_deactivate_fs(1.058)[fs_umount:437] : Unmount of /usr/sap worked. Can stop now. +epprd_rg:cl_deactivate_fs(1.058)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(1.058)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(1.058)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/usr/sap' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:12.682468 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:12.682468|INFO: Deactivating Filesystem|/usr/sap' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(1.087)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(1.087)[fs_umount:687] print -- 0 /dev/saplv /usr/sap +epprd_rg:cl_deactivate_fs(1.087)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.087)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/sapmnt[deactivate_fs_process_resources:764] PS4_LOOP=/sapmnt +epprd_rg:cl_deactivate_fs:/sapmnt[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/sapmnt[deactivate_fs_process_resources:770] fs_umount /sapmnt cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.087)[fs_umount:313] FS=/sapmnt +epprd_rg:cl_deactivate_fs(1.088)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(1.088)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(1.088)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(1.088)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.088)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(1.088)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(1.108)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(1.108)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(1.108)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(1.108)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(1.108)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(1.108)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(1.108)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(1.108)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(1.108)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(1.108)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(1.108)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(1.108)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(1.108)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(1.108)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(1.108)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(1.108)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(1.108)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(1.110)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/sapmnt +epprd_rg:cl_deactivate_fs(1.110)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(1.114)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(1.114)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(1.114)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(1.114)[fs_umount:367] lsfs -c /sapmnt +epprd_rg:cl_deactivate_fs(1.118)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/sapmnt:/dev/sapmntlv:jfs2:::20971520:rw:no:no' +epprd_rg:cl_deactivate_fs(1.118)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(1.119)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/sapmnt:/dev/sapmntlv:jfs2:::20971520:rw:no:no' +epprd_rg:cl_deactivate_fs(1.120)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(1.119)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(1.120)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(1.122)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(1.122)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(1.122)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(1.123)[fs_umount:394] awk '{ if ( $1 == "/dev/sapmntlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(1.123)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(1.124)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(1.128)[fs_umount:394] FS_MOUNTED=/sapmnt +epprd_rg:cl_deactivate_fs(1.128)[fs_umount:395] [[ -n /sapmnt ]] +epprd_rg:cl_deactivate_fs(1.128)[fs_umount:397] [[ /sapmnt != /sapmnt ]] +epprd_rg:cl_deactivate_fs(1.128)[fs_umount:409] [[ /sapmnt == / ]] +epprd_rg:cl_deactivate_fs(1.128)[fs_umount:409] [[ /sapmnt == /usr ]] +epprd_rg:cl_deactivate_fs(1.128)[fs_umount:409] [[ /sapmnt == /dev ]] +epprd_rg:cl_deactivate_fs(1.128)[fs_umount:409] [[ /sapmnt == /proc ]] +epprd_rg:cl_deactivate_fs(1.128)[fs_umount:409] [[ /sapmnt == /var ]] +epprd_rg:cl_deactivate_fs(1.128)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/sapmnt' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:12.751723 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:12.751723|INFO: Deactivating Filesystem|/sapmnt' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(1.157)[fs_umount:427] : Try up to 60 times to unmount /sapmnt +epprd_rg:cl_deactivate_fs(1.157)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(1.157)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(1.157)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(1.159)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:00:12.000 +epprd_rg:cl_deactivate_fs(1.159)[fs_umount:434] umount /sapmnt +epprd_rg:cl_deactivate_fs(1.381)[fs_umount:437] : Unmount of /sapmnt worked. Can stop now. +epprd_rg:cl_deactivate_fs(1.381)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(1.381)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(1.381)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/sapmnt' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:13.006025 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:13.006025|INFO: Deactivating Filesystem|/sapmnt' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(1.411)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(1.411)[fs_umount:687] print -- 0 /dev/sapmntlv /sapmnt +epprd_rg:cl_deactivate_fs(1.411)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.411)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata4[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata4[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata4[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/sapdata4 cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.411)[fs_umount:313] FS=/oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(1.411)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(1.411)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(1.411)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(1.411)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.411)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(1.411)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(1.432)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(1.432)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(1.432)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(1.432)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(1.432)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(1.432)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(1.432)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(1.432)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(1.432)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(1.432)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(1.432)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(1.432)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(1.432)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(1.432)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(1.432)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(1.432)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(1.432)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(1.434)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(1.434)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(1.438)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(1.438)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(1.438)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(1.438)[fs_umount:367] lsfs -c /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(1.441)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata4:/dev/sapdata4lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(1.441)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(1.442)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata4:/dev/sapdata4lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(1.444)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(1.443)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(1.444)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(1.445)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(1.445)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(1.446)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(1.447)[fs_umount:394] awk '{ if ( $1 == "/dev/sapdata4lv" ) print $2 }' +epprd_rg:cl_deactivate_fs(1.447)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(1.447)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(1.452)[fs_umount:394] FS_MOUNTED=/oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(1.452)[fs_umount:395] [[ -n /oracle/EPP/sapdata4 ]] +epprd_rg:cl_deactivate_fs(1.452)[fs_umount:397] [[ /oracle/EPP/sapdata4 != /oracle/EPP/sapdata4 ]] +epprd_rg:cl_deactivate_fs(1.452)[fs_umount:409] [[ /oracle/EPP/sapdata4 == / ]] +epprd_rg:cl_deactivate_fs(1.452)[fs_umount:409] [[ /oracle/EPP/sapdata4 == /usr ]] +epprd_rg:cl_deactivate_fs(1.452)[fs_umount:409] [[ /oracle/EPP/sapdata4 == /dev ]] +epprd_rg:cl_deactivate_fs(1.452)[fs_umount:409] [[ /oracle/EPP/sapdata4 == /proc ]] +epprd_rg:cl_deactivate_fs(1.452)[fs_umount:409] [[ /oracle/EPP/sapdata4 == /var ]] +epprd_rg:cl_deactivate_fs(1.452)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:13.075687 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:13.075687|INFO: Deactivating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(1.481)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(1.481)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(1.481)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(1.481)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(1.483)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:00:13.000 +epprd_rg:cl_deactivate_fs(1.483)[fs_umount:434] umount /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(1.555)[fs_umount:437] : Unmount of /oracle/EPP/sapdata4 worked. Can stop now. +epprd_rg:cl_deactivate_fs(1.555)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(1.555)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(1.555)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:13.179956 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:13.179956|INFO: Deactivating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(1.585)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(1.585)[fs_umount:687] print -- 0 /dev/sapdata4lv /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(1.585)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.585)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata3[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata3[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata3[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/sapdata3 cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.585)[fs_umount:313] FS=/oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(1.585)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(1.585)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(1.585)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(1.585)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.585)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(1.585)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(1.606)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(1.606)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(1.606)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(1.606)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(1.606)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(1.606)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(1.606)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(1.606)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(1.606)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(1.606)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(1.606)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(1.606)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(1.606)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(1.606)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(1.606)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(1.606)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(1.606)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(1.608)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(1.608)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(1.612)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(1.612)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(1.612)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(1.612)[fs_umount:367] lsfs -c /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(1.615)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata3:/dev/sapdata3lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(1.615)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(1.616)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata3:/dev/sapdata3lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(1.617)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(1.618)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(1.618)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(1.619)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(1.619)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(1.619)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(1.621)[fs_umount:394] awk '{ if ( $1 == "/dev/sapdata3lv" ) print $2 }' +epprd_rg:cl_deactivate_fs(1.621)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(1.621)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(1.625)[fs_umount:394] FS_MOUNTED=/oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(1.625)[fs_umount:395] [[ -n /oracle/EPP/sapdata3 ]] +epprd_rg:cl_deactivate_fs(1.625)[fs_umount:397] [[ /oracle/EPP/sapdata3 != /oracle/EPP/sapdata3 ]] +epprd_rg:cl_deactivate_fs(1.625)[fs_umount:409] [[ /oracle/EPP/sapdata3 == / ]] +epprd_rg:cl_deactivate_fs(1.625)[fs_umount:409] [[ /oracle/EPP/sapdata3 == /usr ]] +epprd_rg:cl_deactivate_fs(1.625)[fs_umount:409] [[ /oracle/EPP/sapdata3 == /dev ]] +epprd_rg:cl_deactivate_fs(1.625)[fs_umount:409] [[ /oracle/EPP/sapdata3 == /proc ]] +epprd_rg:cl_deactivate_fs(1.625)[fs_umount:409] [[ /oracle/EPP/sapdata3 == /var ]] +epprd_rg:cl_deactivate_fs(1.626)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:13.249333 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:13.249333|INFO: Deactivating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(1.654)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(1.654)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(1.654)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(1.654)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(1.657)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:00:13.000 +epprd_rg:cl_deactivate_fs(1.657)[fs_umount:434] umount /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(1.729)[fs_umount:437] : Unmount of /oracle/EPP/sapdata3 worked. Can stop now. +epprd_rg:cl_deactivate_fs(1.729)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(1.730)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(1.730)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:13.354256 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:13.354256|INFO: Deactivating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(1.759)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(1.759)[fs_umount:687] print -- 0 /dev/sapdata3lv /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(1.759)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.759)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata2[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata2[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata2[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/sapdata2 cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.759)[fs_umount:313] FS=/oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(1.759)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(1.759)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(1.759)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(1.759)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.759)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(1.759)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(1.780)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(1.780)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(1.780)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(1.780)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(1.780)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(1.780)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(1.780)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(1.780)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(1.780)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(1.780)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(1.780)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(1.780)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(1.780)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(1.780)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(1.780)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(1.780)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(1.780)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(1.782)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(1.782)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(1.787)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(1.787)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(1.787)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(1.787)[fs_umount:367] lsfs -c /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(1.790)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata2:/dev/sapdata2lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(1.790)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(1.791)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata2:/dev/sapdata2lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(1.792)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(1.792)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(1.793)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(1.794)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(1.794)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(1.794)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(1.796)[fs_umount:394] awk '{ if ( $1 == "/dev/sapdata2lv" ) print $2 }' +epprd_rg:cl_deactivate_fs(1.796)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(1.796)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(1.800)[fs_umount:394] FS_MOUNTED=/oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(1.800)[fs_umount:395] [[ -n /oracle/EPP/sapdata2 ]] +epprd_rg:cl_deactivate_fs(1.800)[fs_umount:397] [[ /oracle/EPP/sapdata2 != /oracle/EPP/sapdata2 ]] +epprd_rg:cl_deactivate_fs(1.800)[fs_umount:409] [[ /oracle/EPP/sapdata2 == / ]] +epprd_rg:cl_deactivate_fs(1.800)[fs_umount:409] [[ /oracle/EPP/sapdata2 == /usr ]] +epprd_rg:cl_deactivate_fs(1.800)[fs_umount:409] [[ /oracle/EPP/sapdata2 == /dev ]] +epprd_rg:cl_deactivate_fs(1.800)[fs_umount:409] [[ /oracle/EPP/sapdata2 == /proc ]] +epprd_rg:cl_deactivate_fs(1.800)[fs_umount:409] [[ /oracle/EPP/sapdata2 == /var ]] +epprd_rg:cl_deactivate_fs(1.800)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:13.424123 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:13.424123|INFO: Deactivating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(1.829)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(1.829)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(1.829)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(1.829)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(1.832)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:00:13.000 +epprd_rg:cl_deactivate_fs(1.832)[fs_umount:434] umount /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(1.903)[fs_umount:437] : Unmount of /oracle/EPP/sapdata2 worked. Can stop now. +epprd_rg:cl_deactivate_fs(1.904)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(1.904)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(1.904)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:13.528195 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:13.528195|INFO: Deactivating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(1.933)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(1.933)[fs_umount:687] print -- 0 /dev/sapdata2lv /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(1.933)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.933)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata1[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata1[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata1[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/sapdata1 cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.933)[fs_umount:313] FS=/oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(1.933)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(1.933)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(1.933)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(1.933)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.933)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(1.933)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(1.954)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(1.954)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(1.955)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(1.955)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(1.955)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(1.955)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(1.955)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(1.955)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(1.955)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(1.955)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(1.955)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(1.955)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(1.955)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(1.955)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(1.955)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(1.955)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(1.955)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(1.956)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(1.956)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(1.961)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(1.961)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(1.961)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(1.961)[fs_umount:367] lsfs -c /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(1.964)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata1:/dev/sapdata1lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(1.964)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(1.965)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata1:/dev/sapdata1lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(1.966)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(1.967)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(1.967)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(1.968)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(1.968)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(1.968)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(1.970)[fs_umount:394] awk '{ if ( $1 == "/dev/sapdata1lv" ) print $2 }' +epprd_rg:cl_deactivate_fs(1.970)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(1.970)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(1.974)[fs_umount:394] FS_MOUNTED=/oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(1.974)[fs_umount:395] [[ -n /oracle/EPP/sapdata1 ]] +epprd_rg:cl_deactivate_fs(1.974)[fs_umount:397] [[ /oracle/EPP/sapdata1 != /oracle/EPP/sapdata1 ]] +epprd_rg:cl_deactivate_fs(1.974)[fs_umount:409] [[ /oracle/EPP/sapdata1 == / ]] +epprd_rg:cl_deactivate_fs(1.974)[fs_umount:409] [[ /oracle/EPP/sapdata1 == /usr ]] +epprd_rg:cl_deactivate_fs(1.974)[fs_umount:409] [[ /oracle/EPP/sapdata1 == /dev ]] +epprd_rg:cl_deactivate_fs(1.974)[fs_umount:409] [[ /oracle/EPP/sapdata1 == /proc ]] +epprd_rg:cl_deactivate_fs(1.974)[fs_umount:409] [[ /oracle/EPP/sapdata1 == /var ]] +epprd_rg:cl_deactivate_fs(1.974)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:13.597460 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:13.597460|INFO: Deactivating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(2.002)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(2.002)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(2.002)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(2.002)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(2.005)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:00:13.000 +epprd_rg:cl_deactivate_fs(2.005)[fs_umount:434] umount /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(2.075)[fs_umount:437] : Unmount of /oracle/EPP/sapdata1 worked. Can stop now. +epprd_rg:cl_deactivate_fs(2.075)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(2.076)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(2.076)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:13.699550 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:13.699550|INFO: Deactivating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(2.104)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(2.104)[fs_umount:687] print -- 0 /dev/sapdata1lv /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(2.104)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.104)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/origlogB[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs:/oracle/EPP/origlogB[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/origlogB[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/origlogB cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.105)[fs_umount:313] FS=/oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(2.105)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(2.105)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(2.105)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(2.105)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.105)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(2.105)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(2.125)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(2.125)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(2.125)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(2.125)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(2.125)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(2.125)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(2.125)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(2.125)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(2.125)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(2.125)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(2.125)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(2.125)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(2.125)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(2.125)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(2.125)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(2.125)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(2.125)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(2.127)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(2.127)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(2.131)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(2.131)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(2.131)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(2.131)[fs_umount:367] lsfs -c /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(2.134)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogB:/dev/origlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(2.134)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(2.135)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogB:/dev/origlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(2.136)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(2.137)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(2.137)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(2.138)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(2.138)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(2.138)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(2.140)[fs_umount:394] awk '{ if ( $1 == "/dev/origlogBlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(2.140)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(2.140)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(2.144)[fs_umount:394] FS_MOUNTED=/oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(2.144)[fs_umount:395] [[ -n /oracle/EPP/origlogB ]] +epprd_rg:cl_deactivate_fs(2.144)[fs_umount:397] [[ /oracle/EPP/origlogB != /oracle/EPP/origlogB ]] +epprd_rg:cl_deactivate_fs(2.144)[fs_umount:409] [[ /oracle/EPP/origlogB == / ]] +epprd_rg:cl_deactivate_fs(2.144)[fs_umount:409] [[ /oracle/EPP/origlogB == /usr ]] +epprd_rg:cl_deactivate_fs(2.144)[fs_umount:409] [[ /oracle/EPP/origlogB == /dev ]] +epprd_rg:cl_deactivate_fs(2.144)[fs_umount:409] [[ /oracle/EPP/origlogB == /proc ]] +epprd_rg:cl_deactivate_fs(2.144)[fs_umount:409] [[ /oracle/EPP/origlogB == /var ]] +epprd_rg:cl_deactivate_fs(2.144)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:13.767937 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:13.767937|INFO: Deactivating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(2.173)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(2.173)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(2.173)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(2.173)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(2.176)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:00:13.000 +epprd_rg:cl_deactivate_fs(2.176)[fs_umount:434] umount /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(2.245)[fs_umount:437] : Unmount of /oracle/EPP/origlogB worked. Can stop now. +epprd_rg:cl_deactivate_fs(2.245)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(2.246)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(2.246)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:13.869621 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:13.869621|INFO: Deactivating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(2.274)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(2.274)[fs_umount:687] print -- 0 /dev/origlogBlv /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(2.274)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.275)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/origlogA[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs:/oracle/EPP/origlogA[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/origlogA[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/origlogA cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.275)[fs_umount:313] FS=/oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(2.275)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(2.275)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(2.275)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(2.275)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.275)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(2.275)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(2.295)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(2.295)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(2.295)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(2.295)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(2.295)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(2.295)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(2.295)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(2.295)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(2.295)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(2.295)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(2.295)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(2.295)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(2.295)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(2.295)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(2.295)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(2.295)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(2.295)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(2.297)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(2.297)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(2.301)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(2.301)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(2.301)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(2.301)[fs_umount:367] lsfs -c /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(2.304)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogA:/dev/origlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(2.304)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(2.305)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogA:/dev/origlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(2.306)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(2.307)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(2.307)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(2.308)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(2.308)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(2.308)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(2.310)[fs_umount:394] awk '{ if ( $1 == "/dev/origlogAlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(2.310)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(2.310)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(2.314)[fs_umount:394] FS_MOUNTED=/oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(2.314)[fs_umount:395] [[ -n /oracle/EPP/origlogA ]] +epprd_rg:cl_deactivate_fs(2.314)[fs_umount:397] [[ /oracle/EPP/origlogA != /oracle/EPP/origlogA ]] +epprd_rg:cl_deactivate_fs(2.314)[fs_umount:409] [[ /oracle/EPP/origlogA == / ]] +epprd_rg:cl_deactivate_fs(2.315)[fs_umount:409] [[ /oracle/EPP/origlogA == /usr ]] +epprd_rg:cl_deactivate_fs(2.315)[fs_umount:409] [[ /oracle/EPP/origlogA == /dev ]] +epprd_rg:cl_deactivate_fs(2.315)[fs_umount:409] [[ /oracle/EPP/origlogA == /proc ]] +epprd_rg:cl_deactivate_fs(2.315)[fs_umount:409] [[ /oracle/EPP/origlogA == /var ]] +epprd_rg:cl_deactivate_fs(2.315)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:13.938250 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:13.938250|INFO: Deactivating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(2.343)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(2.343)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(2.343)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(2.343)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(2.346)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:00:13.000 +epprd_rg:cl_deactivate_fs(2.346)[fs_umount:434] umount /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(2.416)[fs_umount:437] : Unmount of /oracle/EPP/origlogA worked. Can stop now. +epprd_rg:cl_deactivate_fs(2.416)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(2.416)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(2.416)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:14.039986 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:14.039986|INFO: Deactivating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(2.445)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(2.445)[fs_umount:687] print -- 0 /dev/origlogAlv /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(2.445)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.445)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/oraarch[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs:/oracle/EPP/oraarch[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/oraarch[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/oraarch cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.445)[fs_umount:313] FS=/oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(2.445)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(2.445)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(2.445)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(2.445)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.445)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(2.445)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(2.466)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(2.466)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(2.466)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(2.466)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(2.466)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(2.466)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(2.466)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(2.466)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(2.466)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(2.466)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(2.466)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(2.466)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(2.466)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(2.466)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(2.466)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(2.466)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(2.466)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(2.468)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(2.468)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(2.472)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(2.472)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(2.472)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(2.472)[fs_umount:367] lsfs -c /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(2.475)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/oraarch:/dev/oraarchlv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(2.475)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(2.476)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/oraarch:/dev/oraarchlv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(2.477)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(2.478)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(2.478)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(2.479)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(2.479)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(2.479)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(2.481)[fs_umount:394] awk '{ if ( $1 == "/dev/oraarchlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(2.481)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(2.481)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(2.485)[fs_umount:394] FS_MOUNTED=/oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(2.485)[fs_umount:395] [[ -n /oracle/EPP/oraarch ]] +epprd_rg:cl_deactivate_fs(2.485)[fs_umount:397] [[ /oracle/EPP/oraarch != /oracle/EPP/oraarch ]] +epprd_rg:cl_deactivate_fs(2.485)[fs_umount:409] [[ /oracle/EPP/oraarch == / ]] +epprd_rg:cl_deactivate_fs(2.485)[fs_umount:409] [[ /oracle/EPP/oraarch == /usr ]] +epprd_rg:cl_deactivate_fs(2.485)[fs_umount:409] [[ /oracle/EPP/oraarch == /dev ]] +epprd_rg:cl_deactivate_fs(2.485)[fs_umount:409] [[ /oracle/EPP/oraarch == /proc ]] +epprd_rg:cl_deactivate_fs(2.485)[fs_umount:409] [[ /oracle/EPP/oraarch == /var ]] +epprd_rg:cl_deactivate_fs(2.485)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:14.108880 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:14.108880|INFO: Deactivating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(2.514)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(2.514)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(2.514)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(2.514)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(2.517)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:00:14.000 +epprd_rg:cl_deactivate_fs(2.517)[fs_umount:434] umount /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(2.587)[fs_umount:437] : Unmount of /oracle/EPP/oraarch worked. Can stop now. +epprd_rg:cl_deactivate_fs(2.587)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(2.587)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(2.587)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:14.211239 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:14.211239|INFO: Deactivating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(2.616)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(2.616)[fs_umount:687] print -- 0 /dev/oraarchlv /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(2.616)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.616)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/mirrlogB[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs:/oracle/EPP/mirrlogB[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/mirrlogB[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/mirrlogB cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.616)[fs_umount:313] FS=/oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(2.616)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(2.616)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(2.616)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(2.616)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.616)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(2.616)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(2.637)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(2.637)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(2.637)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(2.637)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(2.637)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(2.637)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(2.637)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(2.637)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(2.637)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(2.637)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(2.637)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(2.637)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(2.637)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(2.637)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(2.637)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(2.637)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(2.637)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(2.639)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(2.639)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(2.643)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(2.643)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(2.643)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(2.643)[fs_umount:367] lsfs -c /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(2.646)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogB:/dev/mirrlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(2.646)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(2.647)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogB:/dev/mirrlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(2.648)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(2.649)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(2.649)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(2.650)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(2.650)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(2.650)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(2.652)[fs_umount:394] awk '{ if ( $1 == "/dev/mirrlogBlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(2.652)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(2.652)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(2.656)[fs_umount:394] FS_MOUNTED=/oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(2.656)[fs_umount:395] [[ -n /oracle/EPP/mirrlogB ]] +epprd_rg:cl_deactivate_fs(2.656)[fs_umount:397] [[ /oracle/EPP/mirrlogB != /oracle/EPP/mirrlogB ]] +epprd_rg:cl_deactivate_fs(2.656)[fs_umount:409] [[ /oracle/EPP/mirrlogB == / ]] +epprd_rg:cl_deactivate_fs(2.656)[fs_umount:409] [[ /oracle/EPP/mirrlogB == /usr ]] +epprd_rg:cl_deactivate_fs(2.656)[fs_umount:409] [[ /oracle/EPP/mirrlogB == /dev ]] +epprd_rg:cl_deactivate_fs(2.656)[fs_umount:409] [[ /oracle/EPP/mirrlogB == /proc ]] +epprd_rg:cl_deactivate_fs(2.656)[fs_umount:409] [[ /oracle/EPP/mirrlogB == /var ]] +epprd_rg:cl_deactivate_fs(2.657)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:14.279998 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:14.279998|INFO: Deactivating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(2.685)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(2.685)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(2.685)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(2.685)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(2.688)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:00:14.000 +epprd_rg:cl_deactivate_fs(2.688)[fs_umount:434] umount /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(2.757)[fs_umount:437] : Unmount of /oracle/EPP/mirrlogB worked. Can stop now. +epprd_rg:cl_deactivate_fs(2.757)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(2.757)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(2.757)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:14.381437 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:14.381437|INFO: Deactivating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(2.786)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(2.786)[fs_umount:687] print -- 0 /dev/mirrlogBlv /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(2.786)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.786)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/mirrlogA[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs:/oracle/EPP/mirrlogA[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/mirrlogA[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/mirrlogA cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.786)[fs_umount:313] FS=/oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(2.786)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(2.786)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(2.786)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(2.787)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.787)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(2.787)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(2.807)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(2.807)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(2.807)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(2.807)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(2.807)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(2.807)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(2.807)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(2.807)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(2.807)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(2.807)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(2.807)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(2.807)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(2.807)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(2.807)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(2.807)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(2.807)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(2.807)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(2.809)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(2.809)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(2.813)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(2.813)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(2.813)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(2.813)[fs_umount:367] lsfs -c /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(2.816)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogA:/dev/mirrlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(2.816)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(2.817)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogA:/dev/mirrlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(2.818)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(2.819)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(2.819)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(2.820)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(2.820)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(2.820)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(2.822)[fs_umount:394] awk '{ if ( $1 == "/dev/mirrlogAlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(2.822)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(2.822)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(2.826)[fs_umount:394] FS_MOUNTED=/oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(2.826)[fs_umount:395] [[ -n /oracle/EPP/mirrlogA ]] +epprd_rg:cl_deactivate_fs(2.826)[fs_umount:397] [[ /oracle/EPP/mirrlogA != /oracle/EPP/mirrlogA ]] +epprd_rg:cl_deactivate_fs(2.826)[fs_umount:409] [[ /oracle/EPP/mirrlogA == / ]] +epprd_rg:cl_deactivate_fs(2.826)[fs_umount:409] [[ /oracle/EPP/mirrlogA == /usr ]] +epprd_rg:cl_deactivate_fs(2.826)[fs_umount:409] [[ /oracle/EPP/mirrlogA == /dev ]] +epprd_rg:cl_deactivate_fs(2.826)[fs_umount:409] [[ /oracle/EPP/mirrlogA == /proc ]] +epprd_rg:cl_deactivate_fs(2.826)[fs_umount:409] [[ /oracle/EPP/mirrlogA == /var ]] +epprd_rg:cl_deactivate_fs(2.826)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:14.449939 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:14.449939|INFO: Deactivating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(2.855)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(2.855)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(2.855)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(2.855)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(2.858)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:00:14.000 +epprd_rg:cl_deactivate_fs(2.858)[fs_umount:434] umount /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(2.928)[fs_umount:437] : Unmount of /oracle/EPP/mirrlogA worked. Can stop now. +epprd_rg:cl_deactivate_fs(2.928)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(2.928)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(2.928)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:14.551858 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:14.551858|INFO: Deactivating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(2.957)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(2.957)[fs_umount:687] print -- 0 /dev/mirrlogAlv /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(2.957)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.957)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP +epprd_rg:cl_deactivate_fs:/oracle/EPP[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP[deactivate_fs_process_resources:770] fs_umount /oracle/EPP cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.957)[fs_umount:313] FS=/oracle/EPP +epprd_rg:cl_deactivate_fs(2.957)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(2.957)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(2.957)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(2.957)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.957)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(2.957)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(2.977)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(2.977)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(2.977)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(2.977)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(2.977)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(2.977)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(2.977)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(2.977)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(2.977)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(2.977)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(2.977)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(2.977)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(2.977)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(2.977)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(2.977)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(2.977)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(2.977)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(2.979)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP +epprd_rg:cl_deactivate_fs(2.979)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(2.983)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(2.983)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(2.983)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(2.984)[fs_umount:367] lsfs -c /oracle/EPP +epprd_rg:cl_deactivate_fs(2.987)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP:/dev/epplv:jfs2:::62914560:rw:no:no' +epprd_rg:cl_deactivate_fs(2.987)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(2.988)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP:/dev/epplv:jfs2:::62914560:rw:no:no' +epprd_rg:cl_deactivate_fs(2.989)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(2.989)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(2.989)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(2.991)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(2.991)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(2.991)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(2.992)[fs_umount:394] awk '{ if ( $1 == "/dev/epplv" ) print $2 }' +epprd_rg:cl_deactivate_fs(2.992)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(2.993)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(2.997)[fs_umount:394] FS_MOUNTED=/oracle/EPP +epprd_rg:cl_deactivate_fs(2.997)[fs_umount:395] [[ -n /oracle/EPP ]] +epprd_rg:cl_deactivate_fs(2.997)[fs_umount:397] [[ /oracle/EPP != /oracle/EPP ]] +epprd_rg:cl_deactivate_fs(2.997)[fs_umount:409] [[ /oracle/EPP == / ]] +epprd_rg:cl_deactivate_fs(2.997)[fs_umount:409] [[ /oracle/EPP == /usr ]] +epprd_rg:cl_deactivate_fs(2.997)[fs_umount:409] [[ /oracle/EPP == /dev ]] +epprd_rg:cl_deactivate_fs(2.997)[fs_umount:409] [[ /oracle/EPP == /proc ]] +epprd_rg:cl_deactivate_fs(2.997)[fs_umount:409] [[ /oracle/EPP == /var ]] +epprd_rg:cl_deactivate_fs(2.997)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:14.620414 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:14.620414|INFO: Deactivating Filesystem|/oracle/EPP' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(3.025)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP +epprd_rg:cl_deactivate_fs(3.025)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(3.025)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(3.025)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(3.028)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:00:14.000 +epprd_rg:cl_deactivate_fs(3.028)[fs_umount:434] umount /oracle/EPP +epprd_rg:cl_deactivate_fs(3.435)[fs_umount:437] : Unmount of /oracle/EPP worked. Can stop now. +epprd_rg:cl_deactivate_fs(3.435)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(3.435)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(3.435)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:15.059397 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:15.059397|INFO: Deactivating Filesystem|/oracle/EPP' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(3.464)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(3.464)[fs_umount:687] print -- 0 /dev/epplv /oracle/EPP +epprd_rg:cl_deactivate_fs(3.464)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(3.464)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle[deactivate_fs_process_resources:764] PS4_LOOP=/oracle +epprd_rg:cl_deactivate_fs:/oracle[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle[deactivate_fs_process_resources:770] fs_umount /oracle cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(3.464)[fs_umount:313] FS=/oracle +epprd_rg:cl_deactivate_fs(3.464)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(3.464)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(3.464)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(3.464)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(3.464)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(3.464)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(3.485)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(3.485)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(3.485)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(3.485)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(3.485)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(3.485)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(3.485)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(3.485)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(3.485)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(3.485)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(3.485)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(3.485)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(3.485)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(3.485)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(3.485)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(3.485)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(3.485)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(3.487)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle +epprd_rg:cl_deactivate_fs(3.487)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(3.491)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(3.491)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(3.491)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(3.491)[fs_umount:367] lsfs -c /oracle +epprd_rg:cl_deactivate_fs(3.494)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle:/dev/oraclelv:jfs2:::41943040:rw:no:no' +epprd_rg:cl_deactivate_fs(3.494)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(3.495)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle:/dev/oraclelv:jfs2:::41943040:rw:no:no' +epprd_rg:cl_deactivate_fs(3.496)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(3.497)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(3.497)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(3.498)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(3.498)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(3.498)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(3.500)[fs_umount:394] awk '{ if ( $1 == "/dev/oraclelv" ) print $2 }' +epprd_rg:cl_deactivate_fs(3.500)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(3.500)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(3.504)[fs_umount:394] FS_MOUNTED=/oracle +epprd_rg:cl_deactivate_fs(3.504)[fs_umount:395] [[ -n /oracle ]] +epprd_rg:cl_deactivate_fs(3.504)[fs_umount:397] [[ /oracle != /oracle ]] +epprd_rg:cl_deactivate_fs(3.504)[fs_umount:409] [[ /oracle == / ]] +epprd_rg:cl_deactivate_fs(3.504)[fs_umount:409] [[ /oracle == /usr ]] +epprd_rg:cl_deactivate_fs(3.504)[fs_umount:409] [[ /oracle == /dev ]] +epprd_rg:cl_deactivate_fs(3.504)[fs_umount:409] [[ /oracle == /proc ]] +epprd_rg:cl_deactivate_fs(3.504)[fs_umount:409] [[ /oracle == /var ]] +epprd_rg:cl_deactivate_fs(3.504)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:15.127817 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:15.127817|INFO: Deactivating Filesystem|/oracle' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(3.533)[fs_umount:427] : Try up to 60 times to unmount /oracle +epprd_rg:cl_deactivate_fs(3.533)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(3.533)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(3.533)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(3.535)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:00:15.000 +epprd_rg:cl_deactivate_fs(3.536)[fs_umount:434] umount /oracle +epprd_rg:cl_deactivate_fs(3.609)[fs_umount:437] : Unmount of /oracle worked. Can stop now. +epprd_rg:cl_deactivate_fs(3.609)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(3.609)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(3.609)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:15.233560 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:15.233560|INFO: Deactivating Filesystem|/oracle' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(3.638)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(3.638)[fs_umount:687] print -- 0 /dev/oraclelv /oracle +epprd_rg:cl_deactivate_fs(3.638)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(3.638)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/board_org[deactivate_fs_process_resources:764] PS4_LOOP=/board_org +epprd_rg:cl_deactivate_fs:/board_org[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/board_org[deactivate_fs_process_resources:770] fs_umount /board_org cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(3.639)[fs_umount:313] FS=/board_org +epprd_rg:cl_deactivate_fs(3.639)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(3.639)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(3.639)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(3.639)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(3.639)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(3.639)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(3.659)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(3.659)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(3.659)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(3.659)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(3.659)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(3.659)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(3.659)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(3.659)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(3.659)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(3.659)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(3.659)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(3.659)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(3.659)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(3.659)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(3.659)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(3.659)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(3.659)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(3.661)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/board_org +epprd_rg:cl_deactivate_fs(3.661)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(3.665)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(3.665)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(3.665)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(3.665)[fs_umount:367] lsfs -c /board_org +epprd_rg:cl_deactivate_fs(3.668)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(3.669)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(3.670)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(3.671)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(3.671)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(3.671)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(3.672)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(3.673)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(3.673)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(3.674)[fs_umount:394] awk '{ if ( $1 == "/dev/boardlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(3.674)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(3.674)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(3.679)[fs_umount:394] FS_MOUNTED=/board_org +epprd_rg:cl_deactivate_fs(3.679)[fs_umount:395] [[ -n /board_org ]] +epprd_rg:cl_deactivate_fs(3.679)[fs_umount:397] [[ /board_org != /board_org ]] +epprd_rg:cl_deactivate_fs(3.679)[fs_umount:409] [[ /board_org == / ]] +epprd_rg:cl_deactivate_fs(3.679)[fs_umount:409] [[ /board_org == /usr ]] +epprd_rg:cl_deactivate_fs(3.679)[fs_umount:409] [[ /board_org == /dev ]] +epprd_rg:cl_deactivate_fs(3.679)[fs_umount:409] [[ /board_org == /proc ]] +epprd_rg:cl_deactivate_fs(3.679)[fs_umount:409] [[ /board_org == /var ]] +epprd_rg:cl_deactivate_fs(3.679)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/board_org' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:15.302315 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:15.302315|INFO: Deactivating Filesystem|/board_org' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(3.707)[fs_umount:427] : Try up to 60 times to unmount /board_org +epprd_rg:cl_deactivate_fs(3.707)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(3.707)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(3.707)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(3.710)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:00:15.000 +epprd_rg:cl_deactivate_fs(3.710)[fs_umount:434] umount /board_org +epprd_rg:cl_deactivate_fs(3.780)[fs_umount:437] : Unmount of /board_org worked. Can stop now. +epprd_rg:cl_deactivate_fs(3.780)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(3.780)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(3.780)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/board_org' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:00:15.403937 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:00:15.403937|INFO: Deactivating Filesystem|/board_org' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(3.809)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(3.809)[fs_umount:687] print -- 0 /dev/boardlv /board_org +epprd_rg:cl_deactivate_fs(3.809)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(3.809)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/board_org[deactivate_fs_process_resources:773] unset PS4_LOOP +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:777] [[ -n '' ]] +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:786] ALLNOERROR=All_non_error_filesystems +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:788] : update resource manager +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:790] cl_RMupdate resource_down All_non_error_filesystems cl_deactivate_fs 2023-01-28T18:00:15.426997 2023-01-28T18:00:15.431462 +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:794] : Check to see how the unmounts went +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:796] [[ -s /tmp/epprd_rg_deactivate_fs.tmp ]] +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:798] grep -qw ^1 /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:805] grep -qw ^11 /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:814] : All unmounts successful +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:816] STATUS=0 +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:817] rm -f /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:821] return 0 +epprd_rg:cl_deactivate_fs[924] exit 0 +epprd_rg:process_resources[process_file_systems:2668] RC=0 +epprd_rg:process_resources[process_file_systems:2669] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[process_file_systems:2681] (( 0 != 0 )) +epprd_rg:process_resources[process_file_systems:2687] return 0 +epprd_rg:process_resources[3483] RC=0 +epprd_rg:process_resources[3485] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3487] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:00:15.453718 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=VGS ACTION=RELEASE VOLUME_GROUPS='"datavg"' RESOURCE_GROUPS='"epprd_rg' '"' EXPORT_FILESYSTEM='"TRUE"' +epprd_rg:process_resources[1] JOB_TYPE=VGS +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] VOLUME_GROUPS=datavg +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] EXPORT_FILESYSTEM=TRUE +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ VGS == RELEASE ]] +epprd_rg:process_resources[3360] [[ VGS == ONLINE ]] +epprd_rg:process_resources[3571] process_volume_groups_main RELEASE +epprd_rg:process_resources[process_volume_groups_main:2293] PS4_FUNC=process_volume_groups_main +epprd_rg:process_resources[process_volume_groups_main:2293] typeset PS4_FUNC +epprd_rg:process_resources[process_volume_groups_main:2294] [[ high == high ]] +epprd_rg:process_resources[process_volume_groups_main:2294] set -x +epprd_rg:process_resources[process_volume_groups_main:2295] DEF_VARYON_ACTION=0 +epprd_rg:process_resources[process_volume_groups_main:2295] typeset -li DEF_VARYON_ACTION +epprd_rg:process_resources[process_volume_groups_main:2296] FAILURE_IN_METHOD=0 +epprd_rg:process_resources[process_volume_groups_main:2296] typeset -li FAILURE_IN_METHOD +epprd_rg:process_resources[process_volume_groups_main:2297] ACTION=RELEASE +epprd_rg:process_resources[process_volume_groups_main:2297] typeset ACTION +epprd_rg:process_resources[process_volume_groups_main:2298] STAT=0 +epprd_rg:process_resources[process_volume_groups_main:2299] VG_LIST=datavg +epprd_rg:process_resources[process_volume_groups_main:2300] RG_LIST=epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2304] getReplicatedResources epprd_rg +epprd_rg:process_resources[getReplicatedResources:699] PS4_FUNC=getReplicatedResources +epprd_rg:process_resources[getReplicatedResources:699] typeset PS4_FUNC +epprd_rg:process_resources[getReplicatedResources:700] [[ high == high ]] +epprd_rg:process_resources[getReplicatedResources:700] set -x +epprd_rg:process_resources[getReplicatedResources:702] RV=false +epprd_rg:process_resources[getReplicatedResources:704] clodmget -n -f type HACMPrresmethods +epprd_rg:process_resources[getReplicatedResources:704] [[ -n 9 ]] +epprd_rg:process_resources[getReplicatedResources:707] : Replicated resource methods are defined, check for resources +epprd_rg:process_resources[getReplicatedResources:709] clodmget -q $'name like \'*_REP_RESOURCE\' AND group=epprd_rg' -f value -n HACMPresource +epprd_rg:process_resources[getReplicatedResources:709] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:718] : Verify if any backup profiles are configured and trigger cbm utilities based on that +epprd_rg:process_resources[getReplicatedResources:720] clodmget -q name=BACKUP_ENABLED -f value HACMPresource +epprd_rg:process_resources[getReplicatedResources:720] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:739] echo false +epprd_rg:process_resources[process_volume_groups_main:2304] REPLICATED_RESOURCES=false +epprd_rg:process_resources[process_volume_groups_main:2305] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2306] print -- datavg +epprd_rg:process_resources[process_volume_groups_main:2306] read VOLUME_GROUPS VG_LIST +epprd_rg:process_resources[process_volume_groups_main:2306] IFS=: +epprd_rg:process_resources[process_volume_groups_main:2307] VOLUME_GROUPS=datavg +epprd_rg:process_resources[process_volume_groups_main:2310] : At this point, these variables contain information only for epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2312] export VOLUME_GROUPS +epprd_rg:process_resources[process_volume_groups_main:2313] export RESOURCE_GROUPS +epprd_rg:process_resources[process_volume_groups_main:2315] [[ false == true ]] +epprd_rg:process_resources[process_volume_groups_main:2555] process_volume_groups RELEASE +epprd_rg:process_resources[process_volume_groups:2571] PS4_FUNC=process_volume_groups +epprd_rg:process_resources[process_volume_groups:2571] typeset PS4_FUNC +epprd_rg:process_resources[process_volume_groups:2572] [[ high == high ]] +epprd_rg:process_resources[process_volume_groups:2572] set -x +epprd_rg:process_resources[process_volume_groups:2573] STAT=0 +epprd_rg:process_resources[process_volume_groups:2575] GROUPNAME=epprd_rg +epprd_rg:process_resources[process_volume_groups:2575] export GROUPNAME +epprd_rg:process_resources[process_volume_groups:2578] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[process_volume_groups:2603] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[process_volume_groups:2605] cl_deactivate_vgs -n +epprd_rg:cl_deactivate_vgs[458] version=%I% +epprd_rg:cl_deactivate_vgs[461] STATUS=0 +epprd_rg:cl_deactivate_vgs[461] typeset -li STATUS +epprd_rg:cl_deactivate_vgs[462] TMP_VARYOFF_STATUS=/tmp/_deactivate_vgs.tmp +epprd_rg:cl_deactivate_vgs[463] sddsrv_off=FALSE +epprd_rg:cl_deactivate_vgs[464] ALLVGS=All_volume_groups +epprd_rg:cl_deactivate_vgs[465] OEM_CALL=false +epprd_rg:cl_deactivate_vgs[467] (( 1 != 0 )) +epprd_rg:cl_deactivate_vgs[467] [[ -n == -c ]] +epprd_rg:cl_deactivate_vgs[476] EVENT_TYPE=RELEASE_PRIMARY +epprd_rg:cl_deactivate_vgs[477] EVENT_TYPE=RELEASE_PRIMARY +epprd_rg:cl_deactivate_vgs[480] : if JOB_TYPE is set and is not $'\'GROUP\',' then process_resources is parent +epprd_rg:cl_deactivate_vgs[482] [[ VGS != 0 ]] +epprd_rg:cl_deactivate_vgs[482] [[ VGS != GROUP ]] +epprd_rg:cl_deactivate_vgs[485] : parameters passed from process_resources thru environment +epprd_rg:cl_deactivate_vgs[487] PROC_RES=true +epprd_rg:cl_deactivate_vgs[501] : set -u will report an error if any variable used in the script is not set +epprd_rg:cl_deactivate_vgs[503] set -u +epprd_rg:cl_deactivate_vgs[506] : Remove the status file if it currently exists +epprd_rg:cl_deactivate_vgs[508] rm -f /tmp/_deactivate_vgs.tmp +epprd_rg:cl_deactivate_vgs[511] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_deactivate_vgs[512] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_deactivate_vgs[513] : 99.99.999.999 +epprd_rg:cl_deactivate_vgs[515] typeset -li V R M F +epprd_rg:cl_deactivate_vgs[516] typeset -Z2 R +epprd_rg:cl_deactivate_vgs[517] typeset -Z3 M +epprd_rg:cl_deactivate_vgs[518] typeset -Z3 F +epprd_rg:cl_deactivate_vgs[519] VRMF=0 +epprd_rg:cl_deactivate_vgs[519] typeset -li VRMF +epprd_rg:cl_deactivate_vgs[528] ls '/dev/vpath*' +epprd_rg:cl_deactivate_vgs[528] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_vgs[595] : Special processing for 2-node NFS clusters +epprd_rg:cl_deactivate_vgs[597] TWO_NODE_CLUSTER=FALSE +epprd_rg:cl_deactivate_vgs[597] export TWO_NODE_CLUSTER +epprd_rg:cl_deactivate_vgs[598] FS_TYPES='jsf2?log' +epprd_rg:cl_deactivate_vgs[598] export FS_TYPES +epprd_rg:cl_deactivate_vgs[599] wc -l +epprd_rg:cl_deactivate_vgs[599] clodmget -q 'object = VERBOSE_LOGGING' -f name -n HACMPnode +epprd_rg:cl_deactivate_vgs[599] (( 2 == 2 )) +epprd_rg:cl_deactivate_vgs[600] [[ -n TRUE ]] +epprd_rg:cl_deactivate_vgs[602] : two nodes, with exported filesystems +epprd_rg:cl_deactivate_vgs[603] TWO_NODE_CLUSTER=TRUE +epprd_rg:cl_deactivate_vgs[603] export TWO_NODE_CLUSTER +epprd_rg:cl_deactivate_vgs[607] : Pick up a list of currently varyd on volume groups +epprd_rg:cl_deactivate_vgs[609] lsvg -L -o +epprd_rg:cl_deactivate_vgs[609] 2> /tmp/lsvg.err +epprd_rg:cl_deactivate_vgs[609] VG_ON_LIST=$'datavg\ncaavg_private\nrootvg' +epprd_rg:cl_deactivate_vgs[612] : if not called from process_resources, use old-style environment and parameters +epprd_rg:cl_deactivate_vgs[614] [[ true == false ]] +epprd_rg:cl_deactivate_vgs[672] : Called from process_resources +epprd_rg:cl_deactivate_vgs[674] LIST_OF_VOLUME_GROUPS_FOR_RG='' +epprd_rg:cl_deactivate_vgs[679] export GROUPNAME +epprd_rg:cl_deactivate_vgs[681] : Discover the volume groups for this resource group. +epprd_rg:cl_deactivate_vgs[686] echo datavg +epprd_rg:cl_deactivate_vgs[686] read LIST_OF_VOLUME_GROUPS_FOR_RG VOLUME_GROUPS +epprd_rg:cl_deactivate_vgs[686] IFS=: +epprd_rg:cl_deactivate_vgs[689] : Reverse the order, so that VGs release in reverse order of acquisition +epprd_rg:cl_deactivate_vgs[693] sed 's/ /,/g' +epprd_rg:cl_deactivate_vgs[693] echo datavg +epprd_rg:cl_deactivate_vgs[693] LIST_OF_COMMASEP_VG_FOR_RG=datavg +epprd_rg:cl_deactivate_vgs[694] echo datavg +epprd_rg:cl_deactivate_vgs[695] tr , '\n' +epprd_rg:cl_deactivate_vgs[695] egrep -v -w $'rootvg|caavg_private\n |altinst_rootvg|old_rootvg' +epprd_rg:cl_deactivate_vgs[696] sort -ru +epprd_rg:cl_deactivate_vgs[694] LIST_OF_VOLUME_GROUPS_FOR_RG=datavg +epprd_rg:cl_deactivate_vgs[698] : Update Resource Manager - releasing VGs for this RG +epprd_rg:cl_deactivate_vgs[700] cl_RMupdate resource_releasing All_volume_groups cl_deactivate_vgs 2023-01-28T18:00:15.542294 2023-01-28T18:00:15.546779 +epprd_rg:cl_deactivate_vgs[703] : Process the volume groups for this resource group +epprd_rg:cl_deactivate_vgs:datavg[707] PS4_LOOP=datavg +epprd_rg:cl_deactivate_vgs:datavg[711] print datavg caavg_private rootvg +epprd_rg:cl_deactivate_vgs:datavg[711] grep -qw datavg +epprd_rg:cl_deactivate_vgs:datavg[719] : Thie VG is varied on, so go vary it off. Get the VG mode first +epprd_rg:cl_deactivate_vgs:datavg[721] MODE=9999 +epprd_rg:cl_deactivate_vgs:datavg[722] /usr/sbin/getlvodm -v datavg +epprd_rg:cl_deactivate_vgs:datavg[722] VGID=00c44af100004b00000001851e9dc053 +epprd_rg:cl_deactivate_vgs:datavg[723] lqueryvg -g 00c44af100004b00000001851e9dc053 -X +epprd_rg:cl_deactivate_vgs:datavg[723] MODE=32 +epprd_rg:cl_deactivate_vgs:datavg[724] RC=0 +epprd_rg:cl_deactivate_vgs:datavg[725] (( 0 != 0 )) +epprd_rg:cl_deactivate_vgs:datavg[726] : exit status of lqueryvg -g 00c44af100004b00000001851e9dc053 -X: 0 +epprd_rg:cl_deactivate_vgs:datavg[728] vgs_varyoff datavg 32 +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:60] PS4_TIMER=true +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:60] typeset PS4_TIMER +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:61] [[ high == high ]] +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:61] set -x +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:63] VG=datavg +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:63] typeset VG +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:64] MODE=32 +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:64] typeset MODE +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:66] OPEN_FSs='' +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:66] typeset OPEN_FSs +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:67] OPEN_LVs='' +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:67] typeset OPEN_LVs +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:68] typeset TMP_VG_LIST +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:69] TS_FLAGS='' +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:69] typeset TS_FLAGS +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:71] STATUS=0 +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:71] typeset -li STATUS +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:72] RC=0 +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:72] typeset -li RC +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:73] SELECTIVE_FAILOVER=false +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:73] typeset SELECTIVE_FAILOVER +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:74] typeset LV +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:75] lv_list='' +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:75] typeset lv_list +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:76] typeset FS +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:77] FS_MOUNTED='' +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:77] typeset FS_MOUNTED +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:79] rc_fuser=0 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:79] typeset -li rc_fuser +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:80] rc_varyonvg=0 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:80] typeset -li rc_varyonvg +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:81] rc_varyoffvg=0 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:81] typeset -li rc_varyoffvg +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:82] rc_lsvg=0 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:82] typeset -li rc_lsvg +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:83] rc_dfs=0 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:83] typeset -li rc_dfs +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:84] rc_dvg=0 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:84] typeset -li rc_dvg +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:88] typeset -li FV FR FM FF +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:89] typeset -Z2 FR +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:90] typeset -Z3 FM +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:91] typeset -Z3 FF +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:92] FVRMF=0 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:92] typeset -li FVRMF +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:93] fuser_lvl=601004000 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:93] typeset -li fuser_lvl +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:95] lsvg -l -L datavg +epprd_rg:cl_deactivate_vgs(0.095):datavg[vgs_varyoff:95] 2> /dev/null +epprd_rg:cl_deactivate_vgs(0.115):datavg[vgs_varyoff:95] TMP_VG_LIST=$'datavg:\nLV NAME TYPE LPs PPs PVs LV STATE MOUNT POINT\nepprdaloglv jfs2log 1 1 1 closed/syncd N/A\nsaplv jfs2 100 100 7 closed/syncd /usr/sap\nsapmntlv jfs2 20 20 7 closed/syncd /sapmnt\noraclelv jfs2 40 40 7 closed/syncd /oracle\nepplv jfs2 60 60 7 closed/syncd /oracle/EPP\noraarchlv jfs2 100 100 7 closed/syncd /oracle/EPP/oraarch\nsapdata1lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata1\nsapdata2lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata2\nsapdata3lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata3\nsapdata4lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata4\nboardlv jfs2 10 10 7 closed/syncd /board_org\noriglogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogA\noriglogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogB\nmirrlogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogA\nmirrlogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_vgs(0.115):datavg[vgs_varyoff:96] rc_lsvg=0 +epprd_rg:cl_deactivate_vgs(0.115):datavg[vgs_varyoff:98] [[ RELEASE_PRIMARY == reconfig* ]] +epprd_rg:cl_deactivate_vgs(0.115):datavg[vgs_varyoff:114] [[ -n $'datavg:\nLV NAME TYPE LPs PPs PVs LV STATE MOUNT POINT\nepprdaloglv jfs2log 1 1 1 closed/syncd N/A\nsaplv jfs2 100 100 7 closed/syncd /usr/sap\nsapmntlv jfs2 20 20 7 closed/syncd /sapmnt\noraclelv jfs2 40 40 7 closed/syncd /oracle\nepplv jfs2 60 60 7 closed/syncd /oracle/EPP\noraarchlv jfs2 100 100 7 closed/syncd /oracle/EPP/oraarch\nsapdata1lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata1\nsapdata2lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata2\nsapdata3lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata3\nsapdata4lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata4\nboardlv jfs2 10 10 7 closed/syncd /board_org\noriglogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogA\noriglogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogB\nmirrlogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogA\nmirrlogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogB' ]] +epprd_rg:cl_deactivate_vgs(0.115):datavg[vgs_varyoff:117] : Get list of open logical volumes corresponding to filesystems +epprd_rg:cl_deactivate_vgs(0.116):datavg[vgs_varyoff:119] awk '$2 ~ /jfs2?$/ && $6 ~ /open/ {print $1}' +epprd_rg:cl_deactivate_vgs(0.116):datavg[vgs_varyoff:119] print $'datavg:\nLV NAME TYPE LPs PPs PVs LV STATE MOUNT POINT\nepprdaloglv jfs2log 1 1 1 closed/syncd N/A\nsaplv jfs2 100 100 7 closed/syncd /usr/sap\nsapmntlv jfs2 20 20 7 closed/syncd /sapmnt\noraclelv jfs2 40 40 7 closed/syncd /oracle\nepplv jfs2 60 60 7 closed/syncd /oracle/EPP\noraarchlv jfs2 100 100 7 closed/syncd /oracle/EPP/oraarch\nsapdata1lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata1\nsapdata2lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata2\nsapdata3lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata3\nsapdata4lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata4\nboardlv jfs2 10 10 7 closed/syncd /board_org\noriglogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogA\noriglogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogB\nmirrlogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogA\nmirrlogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_vgs(0.120):datavg[vgs_varyoff:119] OPEN_LVs='' +epprd_rg:cl_deactivate_vgs(0.120):datavg[vgs_varyoff:122] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_vgs(0.120):datavg[vgs_varyoff:140] [[ -n '' ]] +epprd_rg:cl_deactivate_vgs(0.120):datavg[vgs_varyoff:167] [[ TRUE == TRUE ]] +epprd_rg:cl_deactivate_vgs(0.120):datavg[vgs_varyoff:170] : For two-node clusters, special processing for the highly available NFS +epprd_rg:cl_deactivate_vgs(0.120):datavg[vgs_varyoff:171] : server function: tell NFS to dump the dup cache into the jfslog or jfs2log +epprd_rg:cl_deactivate_vgs(0.120):datavg[vgs_varyoff:175] : Find the first log device in the saved list of logical volumes +epprd_rg:cl_deactivate_vgs(0.120):datavg[vgs_varyoff:177] pattern='jsf2?log' +epprd_rg:cl_deactivate_vgs(0.122):datavg[vgs_varyoff:178] awk '$2 ~ /jsf2?log/ {printf "/dev/%s\n", $1 ; exit}' +epprd_rg:cl_deactivate_vgs(0.122):datavg[vgs_varyoff:178] print $'datavg:\nLV NAME TYPE LPs PPs PVs LV STATE MOUNT POINT\nepprdaloglv jfs2log 1 1 1 closed/syncd N/A\nsaplv jfs2 100 100 7 closed/syncd /usr/sap\nsapmntlv jfs2 20 20 7 closed/syncd /sapmnt\noraclelv jfs2 40 40 7 closed/syncd /oracle\nepplv jfs2 60 60 7 closed/syncd /oracle/EPP\noraarchlv jfs2 100 100 7 closed/syncd /oracle/EPP/oraarch\nsapdata1lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata1\nsapdata2lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata2\nsapdata3lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata3\nsapdata4lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata4\nboardlv jfs2 10 10 7 closed/syncd /board_org\noriglogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogA\noriglogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogB\nmirrlogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogA\nmirrlogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_vgs(0.126):datavg[vgs_varyoff:178] logdev='' +epprd_rg:cl_deactivate_vgs(0.126):datavg[vgs_varyoff:180] [[ -z '' ]] +epprd_rg:cl_deactivate_vgs(0.126):datavg[vgs_varyoff:181] [[ true == true ]] +epprd_rg:cl_deactivate_vgs(0.126):datavg[vgs_varyoff:182] [[ ONLINE != ONLINE ]] +epprd_rg:cl_deactivate_vgs(0.126):datavg[vgs_varyoff:216] [[ -n '' ]] +epprd_rg:cl_deactivate_vgs(0.126):datavg[vgs_varyoff:223] : Finally, vary off the volume group +epprd_rg:cl_deactivate_vgs(0.126):datavg[vgs_varyoff:226] amlog_trace '' 'Deactivating Volume Group|datavg' +epprd_rg:cl_deactivate_vgs(0.126):datavg[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_vgs(0.127):datavg[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_vgs(0.151):datavg[amlog_trace:319] cltime +epprd_rg:cl_deactivate_vgs(0.154):datavg[amlog_trace:319] DATE=2023-01-28T18:00:15.633975 +epprd_rg:cl_deactivate_vgs(0.154):datavg[amlog_trace:320] echo '|2023-01-28T18:00:15.633975|INFO: Deactivating Volume Group|datavg' +epprd_rg:cl_deactivate_vgs(0.154):datavg[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_vgs(0.154):datavg[vgs_varyoff:228] [[ 32 == 32 ]] +epprd_rg:cl_deactivate_vgs(0.154):datavg[vgs_varyoff:231] : This VG is ECM. Move to passive mode. +epprd_rg:cl_deactivate_vgs(0.154):datavg[vgs_varyoff:244] TS_FLAGS=-o +epprd_rg:cl_deactivate_vgs(0.154):datavg[vgs_varyoff:245] cltime 2023-01-28T18:00:15.636656 +epprd_rg:cl_deactivate_vgs(0.157):datavg[vgs_varyoff:246] varyonvg -c -n -P datavg +epprd_rg:cl_deactivate_vgs(0.157):datavg[vgs_varyoff:246] 2> /dev/null +epprd_rg:cl_deactivate_vgs(0.291):datavg[vgs_varyoff:247] rc_varyonvg=0 +epprd_rg:cl_deactivate_vgs(0.291):datavg[vgs_varyoff:248] : return code from varyonvg -c -n -P datavg is 0 +epprd_rg:cl_deactivate_vgs(0.291):datavg[vgs_varyoff:249] cltime 2023-01-28T18:00:15.774180 +epprd_rg:cl_deactivate_vgs(0.294):datavg[vgs_varyoff:250] (( 0 != 0 )) +epprd_rg:cl_deactivate_vgs(0.294):datavg[vgs_varyoff:277] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_vgs(0.294):datavg[vgs_varyoff:281] amlog_trace '' 'Deactivating Volume Group|datavg' +epprd_rg:cl_deactivate_vgs(0.294):datavg[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_vgs(0.295):datavg[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_vgs(0.320):datavg[amlog_trace:319] cltime +epprd_rg:cl_deactivate_vgs(0.322):datavg[amlog_trace:319] DATE=2023-01-28T18:00:15.802424 +epprd_rg:cl_deactivate_vgs(0.322):datavg[amlog_trace:320] echo '|2023-01-28T18:00:15.802424|INFO: Deactivating Volume Group|datavg' +epprd_rg:cl_deactivate_vgs(0.322):datavg[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_vgs(0.322):datavg[vgs_varyoff:284] RC=0 +epprd_rg:cl_deactivate_vgs(0.322):datavg[vgs_varyoff:287] : Update LVM volume group timestamps in ODM +epprd_rg:cl_deactivate_vgs(0.322):datavg[vgs_varyoff:289] cl_update_vg_odm_ts -o datavg +epprd_rg:cl_update_vg_odm_ts(0.001)[77] version=1.13 +epprd_rg:cl_update_vg_odm_ts(0.001)[121] o_flag='' +epprd_rg:cl_update_vg_odm_ts(0.001)[122] f_flag='' +epprd_rg:cl_update_vg_odm_ts(0.001)[123] getopts :of option +epprd_rg:cl_update_vg_odm_ts(0.001)[126] : Local timestamps should be good, since volume group was +epprd_rg:cl_update_vg_odm_ts(0.001)[127] : just varyied on or off +epprd_rg:cl_update_vg_odm_ts(0.001)[128] o_flag=TRUE +epprd_rg:cl_update_vg_odm_ts(0.001)[123] getopts :of option +epprd_rg:cl_update_vg_odm_ts(0.001)[142] shift 1 +epprd_rg:cl_update_vg_odm_ts(0.001)[144] vg_name=datavg +epprd_rg:cl_update_vg_odm_ts(0.001)[145] [[ -z datavg ]] +epprd_rg:cl_update_vg_odm_ts(0.001)[151] shift +epprd_rg:cl_update_vg_odm_ts(0.001)[152] node_list='' +epprd_rg:cl_update_vg_odm_ts(0.001)[153] /usr/es/sbin/cluster/utilities/cl_get_path all +epprd_rg:cl_update_vg_odm_ts(0.004)[153] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin +epprd_rg:cl_update_vg_odm_ts(0.004)[155] [[ -z '' ]] +epprd_rg:cl_update_vg_odm_ts(0.004)[158] : Check to see if this update is necessary - some LVM levels automatically +epprd_rg:cl_update_vg_odm_ts(0.004)[159] : update volume group timestamps clusterwide. +epprd_rg:cl_update_vg_odm_ts(0.004)[163] instfix -iqk IV74100 +epprd_rg:cl_update_vg_odm_ts(0.005)[163] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.012)[164] instfix -iqk IV74883 +epprd_rg:cl_update_vg_odm_ts(0.013)[164] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.020)[165] instfix -iqk IV74698 +epprd_rg:cl_update_vg_odm_ts(0.021)[165] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.028)[166] instfix -iqk IV74246 +epprd_rg:cl_update_vg_odm_ts(0.028)[166] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.035)[174] emgr -l -L IV74883 +epprd_rg:cl_update_vg_odm_ts(0.036)[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.291)[174] emgr -l -L IV74698 +epprd_rg:cl_update_vg_odm_ts(0.292)[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.543)[174] emgr -l -L IV74246 +epprd_rg:cl_update_vg_odm_ts(0.544)[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.795)[183] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_update_vg_odm_ts(0.795)[184] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_update_vg_odm_ts(0.795)[185] : 99.99.999.999 +epprd_rg:cl_update_vg_odm_ts(0.795)[187] typeset -li V R M F +epprd_rg:cl_update_vg_odm_ts(0.795)[188] typeset -Z2 V +epprd_rg:cl_update_vg_odm_ts(0.795)[189] typeset -Z2 R +epprd_rg:cl_update_vg_odm_ts(0.795)[190] typeset -Z3 M +epprd_rg:cl_update_vg_odm_ts(0.795)[191] typeset -Z3 F +epprd_rg:cl_update_vg_odm_ts(0.795)[192] lvm_lvl6=601008015 +epprd_rg:cl_update_vg_odm_ts(0.795)[192] typeset -li lvm_lvl6 +epprd_rg:cl_update_vg_odm_ts(0.795)[194] lvm_lvl7=701003046 +epprd_rg:cl_update_vg_odm_ts(0.795)[194] typeset -li lvm_lvl7 +epprd_rg:cl_update_vg_odm_ts(0.795)[195] VRMF=0 +epprd_rg:cl_update_vg_odm_ts(0.795)[195] typeset -li VRMF +epprd_rg:cl_update_vg_odm_ts(0.795)[198] : Here try and figure out what level of LVM is installed +epprd_rg:cl_update_vg_odm_ts(0.796)[200] lslpp -lcqOr bos.rte.lvm +epprd_rg:cl_update_vg_odm_ts(0.797)[200] cut -f3 -d: +epprd_rg:cl_update_vg_odm_ts(0.799)[200] read V R M F +epprd_rg:cl_update_vg_odm_ts(0.799)[200] IFS=. +epprd_rg:cl_update_vg_odm_ts(0.799)[201] VRMF=0702005101 +epprd_rg:cl_update_vg_odm_ts(0.799)[203] (( 7 == 6 && 702005101 >= 601008015 )) +epprd_rg:cl_update_vg_odm_ts(0.799)[204] (( 702005101 >= 701003046 )) +epprd_rg:cl_update_vg_odm_ts(0.799)[207] : LVM at a level in which timestamp update is unnecessary +epprd_rg:cl_update_vg_odm_ts(0.799)[209] return 0 +epprd_rg:cl_deactivate_vgs(1.125):datavg[vgs_varyoff:291] (( 0 == 0 )) +epprd_rg:cl_deactivate_vgs(1.126):datavg[vgs_varyoff:294] : successful varyoff, set the fence height to read-only +epprd_rg:cl_deactivate_vgs(1.126):datavg[vgs_varyoff:297] cl_set_vg_fence_height -c datavg ro cl_set_vg_fence_height[126]: version @(#)10 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 cl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY) cl_set_vg_fence_height[214]: read(datavg, 16) cl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=ro(2)) +epprd_rg:cl_deactivate_vgs(1.129):datavg[vgs_varyoff:298] RC=0 +epprd_rg:cl_deactivate_vgs(1.129):datavg[vgs_varyoff:299] (( 0 != 0 )) +epprd_rg:cl_deactivate_vgs(1.129):datavg[vgs_varyoff:403] : Append status to the status file. +epprd_rg:cl_deactivate_vgs(1.129):datavg[vgs_varyoff:407] echo datavg 0 +epprd_rg:cl_deactivate_vgs(1.129):datavg[vgs_varyoff:407] 1>> /tmp/_deactivate_vgs.tmp +epprd_rg:cl_deactivate_vgs(1.130):datavg[vgs_varyoff:408] return 0 +epprd_rg:cl_deactivate_vgs(1.130):datavg[731] unset PS4_LOOP +epprd_rg:cl_deactivate_vgs(1.130)[736] : Wait for the background instances of vgs_varyoff +epprd_rg:cl_deactivate_vgs(1.130)[738] wait +epprd_rg:cl_deactivate_vgs(1.130)[741] : Collect any failure indications from backgrounded varyoff processing +epprd_rg:cl_deactivate_vgs(1.130)[743] [[ -f /tmp/_deactivate_vgs.tmp ]] +epprd_rg:cl_deactivate_vgs(1.131)[748] cat /tmp/_deactivate_vgs.tmp +epprd_rg:cl_deactivate_vgs(1.131)[748] read VGNAME VARYOFF_STATUS +epprd_rg:cl_deactivate_vgs(1.132)[750] [[ 0 == 1 ]] +epprd_rg:cl_deactivate_vgs(1.132)[748] read VGNAME VARYOFF_STATUS +epprd_rg:cl_deactivate_vgs(1.133)[765] rm -f /tmp/_deactivate_vgs.tmp +epprd_rg:cl_deactivate_vgs(1.135)[769] : Update Resource Manager - release success for the non-error VGs +epprd_rg:cl_deactivate_vgs(1.135)[771] ALLNOERRVGS=All_nonerror_volume_groups +epprd_rg:cl_deactivate_vgs(1.135)[772] [[ true == false ]] +epprd_rg:cl_deactivate_vgs(1.135)[778] cl_RMupdate resource_down All_nonerror_volume_groups cl_deactivate_vgs 2023-01-28T18:00:16.637900 2023-01-28T18:00:16.642379 +epprd_rg:cl_deactivate_vgs(1.163)[782] [[ FALSE == TRUE ]] +epprd_rg:cl_deactivate_vgs(1.163)[791] exit 0 +epprd_rg:process_resources[process_volume_groups:2606] RC=0 +epprd_rg:process_resources[process_volume_groups:2607] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[process_volume_groups:2620] (( 0 != 0 )) +epprd_rg:process_resources[process_volume_groups:2627] return 0 +epprd_rg:process_resources[process_volume_groups_main:2556] STAT=0 +epprd_rg:process_resources[process_volume_groups_main:2559] return 0 +epprd_rg:process_resources[3572] RC=0 +epprd_rg:process_resources[3573] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3575] [[ 0 != 0 ]] +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:00:16.655828 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=SERVICE_LABELS ACTION=RELEASE IP_LABELS='"epprd"' RESOURCE_GROUPS='"epprd_rg' '"' COMMUNICATION_LINKS='""' +epprd_rg:process_resources[1] JOB_TYPE=SERVICE_LABELS +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] IP_LABELS=epprd +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] COMMUNICATION_LINKS='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ SERVICE_LABELS == RELEASE ]] +epprd_rg:process_resources[3360] [[ SERVICE_LABELS == ONLINE ]] +epprd_rg:process_resources[3407] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[3411] release_service_labels +epprd_rg:process_resources[release_service_labels:3125] PS4_FUNC=release_service_labels +epprd_rg:process_resources[release_service_labels:3125] typeset PS4_FUNC +epprd_rg:process_resources[release_service_labels:3126] [[ high == high ]] +epprd_rg:process_resources[release_service_labels:3126] set -x +epprd_rg:process_resources[release_service_labels:3127] STAT=0 +epprd_rg:process_resources[release_service_labels:3128] clcallev release_service_addr Jan 28 2023 18:00:16 EVENT START: release_service_addr |2023-01-28T18:00:16|22169|EVENT START: release_service_addr | +epprd_rg:release_service_addr[87] version=1.44 +epprd_rg:release_service_addr[90] STATUS=0 +epprd_rg:release_service_addr[91] PROC_RES=false +epprd_rg:release_service_addr[95] [[ SERVICE_LABELS != 0 ]] +epprd_rg:release_service_addr[95] [[ SERVICE_LABELS != GROUP ]] +epprd_rg:release_service_addr[96] PROC_RES=true +epprd_rg:release_service_addr[97] _IP_LABELS=epprd +epprd_rg:release_service_addr[109] saveNSORDER=UNDEFINED +epprd_rg:release_service_addr[110] NSORDER=local +epprd_rg:release_service_addr[110] export NSORDER +epprd_rg:release_service_addr[117] export GROUPNAME +epprd_rg:release_service_addr[119] [[ true == true ]] +epprd_rg:release_service_addr[120] get_list_head epprd +epprd_rg:release_service_addr[120] read SERVICELABELS +epprd_rg:release_service_addr[121] get_list_tail epprd +epprd_rg:release_service_addr[121] read IP_LABELS +epprd_rg:release_service_addr[127] cl_RMupdate resource_releasing All_service_addrs release_service_addr 2023-01-28T18:00:16.741228 2023-01-28T18:00:16.745688 +epprd_rg:release_service_addr[136] clgetif -a epprd +epprd_rg:release_service_addr[136] LC_ALL=C en0 +epprd_rg:release_service_addr[137] return_code=0 +epprd_rg:release_service_addr[137] typeset -li return_code +epprd_rg:release_service_addr[138] (( 0 )) +epprd_rg:release_service_addr[159] cllsif -J '~' -Sn epprd +epprd_rg:release_service_addr[159] cut -d~ -f7 +epprd_rg:release_service_addr[159] uniq +epprd_rg:release_service_addr[159] textual_addr=61.81.244.156 +epprd_rg:release_service_addr[160] clgetif -a 61.81.244.156 +epprd_rg:release_service_addr[160] LC_ALL=C +epprd_rg:release_service_addr[160] INTERFACE='en0 ' +epprd_rg:release_service_addr[161] [[ -z 'en0 ' ]] +epprd_rg:release_service_addr[182] clgetif -n 61.81.244.156 +epprd_rg:release_service_addr[182] LC_ALL=C +epprd_rg:release_service_addr[182] NETMASK='255.255.255.0 ' +epprd_rg:release_service_addr[183] cllsif -J '~' +epprd_rg:release_service_addr[183] grep -wF 61.81.244.156 +epprd_rg:release_service_addr[184] cut -d~ -f3 +epprd_rg:release_service_addr[184] sort -u +epprd_rg:release_service_addr[183] NETWORK=net_ether_01 +epprd_rg:release_service_addr[189] cllsif -J '~' -Si epprda +epprd_rg:release_service_addr[189] grep '~boot~' +epprd_rg:release_service_addr[190] cut -d~ -f3,7 +epprd_rg:release_service_addr[190] grep ^net_ether_01~ +epprd_rg:release_service_addr[191] cut -d~ -f2 +epprd_rg:release_service_addr[191] tail -1 +epprd_rg:release_service_addr[189] BOOT=61.81.244.134 +epprd_rg:release_service_addr[193] [[ -z 61.81.244.134 ]] +epprd_rg:release_service_addr[214] [[ -n 'en0 ' ]] +epprd_rg:release_service_addr[216] cut -f15 -d~ +epprd_rg:release_service_addr[216] cllsif -J '~' -Sn 61.81.244.156 +epprd_rg:release_service_addr[216] [[ AF_INET == AF_INET6 ]] +epprd_rg:release_service_addr[221] cl_swap_IP_address rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[462] version=1.9.14.8 +epprd_rg:cl_swap_IP_address[464] cl_get_path -S +epprd_rg:cl_swap_IP_address[464] OP_SEP='~' +epprd_rg:cl_swap_IP_address[465] LC_ALL=C +epprd_rg:cl_swap_IP_address[465] export LC_ALL +epprd_rg:cl_swap_IP_address[466] RESTORE_ROUTES=/usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[468] cl_echo 33 'Starting execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0' /usr/es/sbin/cluster/events/utils/cl_swap_IP_address 'rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0' Jan 28 2023 18:00:16Starting execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0+epprd_rg:cl_swap_IP_address[470] typeset -i oslevel +epprd_rg:cl_swap_IP_address[471] /usr/bin/sed s/-//g +epprd_rg:cl_swap_IP_address[471] /usr/bin/oslevel -r +epprd_rg:cl_swap_IP_address[471] oslevel=720005 +epprd_rg:cl_swap_IP_address[476] [[ 6 == 6 ]] +epprd_rg:cl_swap_IP_address[477] [[ 6 == 7 ]] +epprd_rg:cl_swap_IP_address[484] no -a +epprd_rg:cl_swap_IP_address[484] grep ipignoreredirects +epprd_rg:cl_swap_IP_address[484] awk '{ print $3 }' +epprd_rg:cl_swap_IP_address[484] PRIOR_IPIGNORE_REDIRECTS_VALUE=0 +epprd_rg:cl_swap_IP_address[485] /usr/sbin/no -o ipignoreredirects=1 Setting ipignoreredirects to 1 +epprd_rg:cl_swap_IP_address[490] PROC_RES=false +epprd_rg:cl_swap_IP_address[491] [[ SERVICE_LABELS != 0 ]] +epprd_rg:cl_swap_IP_address[491] [[ SERVICE_LABELS != GROUP ]] +epprd_rg:cl_swap_IP_address[492] PROC_RES=true +epprd_rg:cl_swap_IP_address[495] set -u +epprd_rg:cl_swap_IP_address[497] RC=0 +epprd_rg:cl_swap_IP_address[504] netstat -in Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll en0 1500 link#2 fa.e6.13.4e.a9.20 183735410 0 60752085 0 0 en0 1500 61.81.244 61.81.244.156 183735410 0 60752085 0 0 en0 1500 61.81.244 61.81.244.134 183735410 0 60752085 0 0 lo0 16896 link#1 34267429 0 34267429 0 0 lo0 16896 127 127.0.0.1 34267429 0 34267429 0 0 lo0 16896 ::1%1 34267429 0 34267429 0 0 +epprd_rg:cl_swap_IP_address[505] netstat -rnC Routing tables Destination Gateway Flags Wt Policy If Cost Config_Cost Route tree for Protocol Family 2 (Internet): default 61.81.244.1 UG 1 - en0 0 0 61.81.244.0 61.81.244.156 UHSb 1 - en0 0 0 => 61.81.244/24 61.81.244.156 U 1 - en0 0 0 61.81.244.134 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.156 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.255 61.81.244.156 UHSb 1 - en0 0 0 127/8 127.0.0.1 U 1 - lo0 0 0 Route tree for Protocol Family 24 (Internet v6): ::1%1 ::1%1 UH 1 - lo0 0 0 +epprd_rg:cl_swap_IP_address[506] CASC_OR_ROT=rotating +epprd_rg:cl_swap_IP_address[507] ACQ_OR_RLSE=release +epprd_rg:cl_swap_IP_address[508] IF=en0 +epprd_rg:cl_swap_IP_address[509] ADDR=61.81.244.134 +epprd_rg:cl_swap_IP_address[510] OLD_ADDR=61.81.244.156 +epprd_rg:cl_swap_IP_address[511] NETMASK=255.255.255.0 +epprd_rg:cl_swap_IP_address[514] [[ rotating == cascading ]] +epprd_rg:cl_swap_IP_address[525] cut -f3 -d~ +epprd_rg:cl_swap_IP_address[525] cllsif -J '~' -Sw -n 61.81.244.134 +epprd_rg:cl_swap_IP_address[525] NET=net_ether_01 +epprd_rg:cl_swap_IP_address[528] clodmget -qidentifier=61.81.244.134 -f max_aliases -n HACMPadapter +epprd_rg:cl_swap_IP_address[528] ALIAS_FIRST=0 +epprd_rg:cl_swap_IP_address[529] grep -c -w inet +epprd_rg:cl_swap_IP_address[529] ifconfig en0 +epprd_rg:cl_swap_IP_address[529] LC_ALL=C +epprd_rg:cl_swap_IP_address[529] NUM_ADDRS=2 +epprd_rg:cl_swap_IP_address[530] [[ release == acquire ]] +epprd_rg:cl_swap_IP_address[598] cl_echo 7320 'cl_swap_IP_address: Removing aliased IP address 61.81.244.156 from adapter en0' cl_swap_IP_address 61.81.244.156 en0 Jan 28 2023 18:00:16cl_swap_IP_address: Removing aliased IP address 61.81.244.156 from adapter en0+epprd_rg:cl_swap_IP_address[600] amlog_trace '' 'Deliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_swap_IP_address[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_swap_IP_address[amlog_trace:319] cltime +epprd_rg:cl_swap_IP_address[amlog_trace:319] DATE=2023-01-28T18:00:16.987832 +epprd_rg:cl_swap_IP_address[amlog_trace:320] echo '|2023-01-28T18:00:16.987832|INFO: Deliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_swap_IP_address[601] PERSISTENT='' +epprd_rg:cl_swap_IP_address[602] ADDR1=61.81.244.156 +epprd_rg:cl_swap_IP_address[603] disable_pmtu_gated Setting tcp_pmtu_discover to 0 Setting udp_pmtu_discover to 0 +epprd_rg:cl_swap_IP_address[604] alias_replace_routes /usr/es/sbin/cluster/.restore_routes en0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:168] RR=/usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[alias_replace_routes:169] shift +epprd_rg:cl_swap_IP_address[alias_replace_routes:170] interfaces=en0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:171] RC=0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:173] cp /dev/null /usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[alias_replace_routes:175] cat +epprd_rg:cl_swap_IP_address[alias_replace_routes:175] 1> /usr/es/sbin/cluster/.restore_routes 0<< \EOF +epprd_rg:cl_swap_IP_address[alias_replace_routes:175] date #!/bin/ksh # # Script created by cl_swap_IP_address on Sat Jan 28 18:00:17 KORST 2023 # PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin PS4='${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO] ' export VERBOSE_LOGGING=${VERBOSE_LOGGING:-"high"} [[ "$VERBOSE_LOGGING" = "high" ]] && set -x : Starting $0 at $(date) # EOF +epprd_rg:cl_swap_IP_address[alias_replace_routes:189] awk '$3 !~ "[Ll]ink" && $3 !~ ":" && $3 !~ "Network" {print $4}' +epprd_rg:cl_swap_IP_address[alias_replace_routes:189] netstat -in +epprd_rg:cl_swap_IP_address[alias_replace_routes:189] LOCADDRS=$'61.81.244.156\n61.81.244.134\n127.0.0.1' +epprd_rg:cl_swap_IP_address[alias_replace_routes:191] netstat -rnC Routing tables Destination Gateway Flags Wt Policy If Cost Config_Cost Route tree for Protocol Family 2 (Internet): default 61.81.244.1 UG 1 - en0 0 0 61.81.244.0 61.81.244.156 UHSb 1 - en0 0 0 => 61.81.244/24 61.81.244.156 U 1 - en0 0 0 61.81.244.134 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.156 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.255 61.81.244.156 UHSb 1 - en0 0 0 127/8 127.0.0.1 U 1 - lo0 0 0 Route tree for Protocol Family 24 (Internet v6): ::1%1 ::1%1 UH 1 - lo0 0 0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:200] I=1 +epprd_rg:cl_swap_IP_address[alias_replace_routes:200] typeset -li I +epprd_rg:cl_swap_IP_address[alias_replace_routes:201] NXTSVC='' +epprd_rg:cl_swap_IP_address[alias_replace_routes:203] awk '$3 !~ "[Ll]ink" && $3 !~ ":" && ($1 == "en0" || $1 == "en0*") {print $4}' +epprd_rg:cl_swap_IP_address[alias_replace_routes:203] netstat -in +epprd_rg:cl_swap_IP_address[alias_replace_routes:203] IFADDRS=$'61.81.244.156\n61.81.244.134' +epprd_rg:cl_swap_IP_address[alias_replace_routes:204] cllsif -J '~' -Spi epprda +epprd_rg:cl_swap_IP_address[alias_replace_routes:204] grep '~net_ether_01~' +epprd_rg:cl_swap_IP_address[alias_replace_routes:205] grep -E '~service~|~persistent~' +epprd_rg:cl_swap_IP_address[alias_replace_routes:205] cut -d~ -f7 +epprd_rg:cl_swap_IP_address[alias_replace_routes:205] sort -u +epprd_rg:cl_swap_IP_address[alias_replace_routes:204] SVCADDRS=61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:210] awk '$1 !~ ":" {print $1}' +epprd_rg:cl_swap_IP_address[alias_replace_routes:210] echo 61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:210] SVCADDRS=61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:212] cllsif -J '~' -Spi epprda +epprd_rg:cl_swap_IP_address[alias_replace_routes:212] grep '~net_ether_01~' +epprd_rg:cl_swap_IP_address[alias_replace_routes:213] grep -E '~persistent~' +epprd_rg:cl_swap_IP_address[alias_replace_routes:214] cut -d~ -f7 +epprd_rg:cl_swap_IP_address[alias_replace_routes:212] PERSISTENT_IP='' +epprd_rg:cl_swap_IP_address[alias_replace_routes:215] routeaddr='' +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:223] [[ -z '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:225] routeaddr=61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:227] [[ 61.81.244.156 != 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:234] [[ -n '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.134 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.134 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] [[ 61.81.244.134 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:234] [[ -n '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:243] NXTADDR='' +epprd_rg:cl_swap_IP_address[alias_replace_routes:244] bootaddr='' +epprd_rg:cl_swap_IP_address[alias_replace_routes:245] [[ -z '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:247] cllsif -J '~' -Spi epprda +epprd_rg:cl_swap_IP_address[alias_replace_routes:247] grep '~net_ether_01~' +epprd_rg:cl_swap_IP_address[alias_replace_routes:247] grep '~boot~' +epprd_rg:cl_swap_IP_address[alias_replace_routes:247] awk -F~ '$9 == "en0" { print $7; }' +epprd_rg:cl_swap_IP_address[alias_replace_routes:247] bootaddr=61.81.244.134 +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] [[ 61.81.244.156 == 61.81.244.134 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] [[ 61.81.244.134 == 61.81.244.134 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] clgetnet 61.81.244.134 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] clgetnet 61.81.244.134 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] clgetnet 61.81.244.134 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] [[ 61.81.244.134 != 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:252] NXTADDR=61.81.244.134 +epprd_rg:cl_swap_IP_address[alias_replace_routes:253] break +epprd_rg:cl_swap_IP_address[alias_replace_routes:258] swaproute=0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:259] NETSTAT_FLAGS='-nrf inet' +epprd_rg:cl_swap_IP_address[alias_replace_routes:261] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:264] swaproute=1 +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] netstat -nrf inet +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] fgrep -w en0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] read DEST GW FLAGS OTHER +epprd_rg:cl_swap_IP_address[alias_replace_routes:268] LOOPBACK=127.0.0.1 +epprd_rg:cl_swap_IP_address[alias_replace_routes:336] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:336] clgetnet 61.81.244.1 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:336] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:338] [[ 0 == 0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:341] [[ -z release ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:341] [[ 61.81.244.156 == ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:346] print 'cl_route_change default 127.0.0.1 61.81.244.1 inet' +epprd_rg:cl_swap_IP_address[alias_replace_routes:346] 1>> /usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[alias_replace_routes:347] add_rc_check /usr/es/sbin/cluster/.restore_routes cl_route_change +epprd_rg:cl_swap_IP_address[add_rc_check:70] RR=/usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[add_rc_check:71] FUNC=cl_route_change +epprd_rg:cl_swap_IP_address[add_rc_check:73] cat +epprd_rg:cl_swap_IP_address[add_rc_check:73] 1>> /usr/es/sbin/cluster/.restore_routes 0<< \EOF rc=$? if [[ $rc != 0 ]] then echo "ERROR: cl_route_change failed with code $rc" cl_route_change_RC=$rc fi EOF +epprd_rg:cl_swap_IP_address[alias_replace_routes:350] cl_route_change default 61.81.244.1 127.0.0.1 inet +epprd_rg:cl_swap_IP_address[alias_replace_routes:351] RC=0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:352] : cl_route_change completed with 0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:353] I=I+1 +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] read DEST GW FLAGS OTHER +epprd_rg:cl_swap_IP_address[alias_replace_routes:268] LOOPBACK=127.0.0.2 +epprd_rg:cl_swap_IP_address[alias_replace_routes:290] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:292] [[ '' != '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:292] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:294] route delete -host 61.81.244.0 61.81.244.156 61.81.244.156 host 61.81.244.0: gateway 61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] read DEST GW FLAGS OTHER +epprd_rg:cl_swap_IP_address[alias_replace_routes:268] LOOPBACK=127.0.0.2 +epprd_rg:cl_swap_IP_address[alias_replace_routes:272] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:272] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:272] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:274] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:276] [[ '' != '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:276] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:279] route delete -net 61.81.244/24 61.81.244.156 61.81.244.156 net 61.81.244: gateway 61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] read DEST GW FLAGS OTHER +epprd_rg:cl_swap_IP_address[alias_replace_routes:268] LOOPBACK=127.0.0.2 +epprd_rg:cl_swap_IP_address[alias_replace_routes:290] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:292] [[ '' != '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:292] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:294] route delete -host 61.81.244.255 61.81.244.156 61.81.244.156 host 61.81.244.255: gateway 61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] read DEST GW FLAGS OTHER +epprd_rg:cl_swap_IP_address[alias_replace_routes:360] echo 'exit $cl_route_change_RC' +epprd_rg:cl_swap_IP_address[alias_replace_routes:360] 1>> /usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[alias_replace_routes:361] chmod +x /usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[alias_replace_routes:362] return 0 +epprd_rg:cl_swap_IP_address[605] RC=0 +epprd_rg:cl_swap_IP_address[606] : alias_replace_routes completed with 0 +epprd_rg:cl_swap_IP_address[609] clifconfig en0 delete 61.81.244.156 +epprd_rg:clifconfig[117] version=1.9 +epprd_rg:clifconfig[121] set -A args en0 delete 61.81.244.156 +epprd_rg:clifconfig[124] interface=en0 +epprd_rg:clifconfig[125] shift +epprd_rg:clifconfig[127] [[ -n delete ]] +epprd_rg:clifconfig[130] delete_val=1 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n 61.81.244.156 ]] +epprd_rg:clifconfig[147] params=' address=61.81.244.156' +epprd_rg:clifconfig[147] addr=61.81.244.156 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n '' ]] +epprd_rg:clifconfig[174] [[ -n 1 ]] +epprd_rg:clifconfig[174] [[ -n epprd_rg ]] +epprd_rg:clifconfig[175] clwparname epprd_rg +epprd_rg:clwparname[38] version=1.3.1.1 +epprd_rg:clwparname[44] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clwparname[44] [[ -z '' ]] +epprd_rg:clwparname[44] exit 0 +epprd_rg:clifconfig[175] WPARNAME='' +epprd_rg:clifconfig[176] (( 0 == 0 )) +epprd_rg:clifconfig[176] [[ -n '' ]] +epprd_rg:clifconfig[218] belongs_to_an_active_wpar 61.81.244.156 +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] [[ -z '' ]] +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] return 1 +epprd_rg:clifconfig[218] read wpar_name wpar_if wpar_netmask wpar_broadcast +epprd_rg:clifconfig[218] IFS='~' +epprd_rg:clifconfig[219] rc=1 +epprd_rg:clifconfig[221] [[ 1 == 0 ]] +epprd_rg:clifconfig[275] ifconfig en0 delete 61.81.244.156 +epprd_rg:cl_swap_IP_address[611] [[ 1 == 1 ]] +epprd_rg:cl_swap_IP_address[613] [[ -n '' ]] +epprd_rg:cl_swap_IP_address[662] [[ -n 61.81.244.134 ]] +epprd_rg:cl_swap_IP_address[671] (( 720005 <= 710003 )) +epprd_rg:cl_swap_IP_address[675] clifconfig en0 alias 61.81.244.134 netmask 255.255.255.0 +epprd_rg:clifconfig[117] version=1.9 +epprd_rg:clifconfig[121] set -A args en0 alias 61.81.244.134 netmask 255.255.255.0 +epprd_rg:clifconfig[124] interface=en0 +epprd_rg:clifconfig[125] shift +epprd_rg:clifconfig[127] [[ -n alias ]] +epprd_rg:clifconfig[129] alias_val=1 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n 61.81.244.134 ]] +epprd_rg:clifconfig[147] params=' address=61.81.244.134' +epprd_rg:clifconfig[147] addr=61.81.244.134 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n netmask ]] +epprd_rg:clifconfig[149] params=' address=61.81.244.134 netmask=255.255.255.0' +epprd_rg:clifconfig[149] shift +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n '' ]] +epprd_rg:clifconfig[174] [[ -n 1 ]] +epprd_rg:clifconfig[174] [[ -n epprd_rg ]] +epprd_rg:clifconfig[175] clwparname epprd_rg +epprd_rg:clwparname[38] version=1.3.1.1 +epprd_rg:clwparname[44] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clwparname[44] [[ -z '' ]] +epprd_rg:clwparname[44] exit 0 +epprd_rg:clifconfig[175] WPARNAME='' +epprd_rg:clifconfig[176] (( 0 == 0 )) +epprd_rg:clifconfig[176] [[ -n '' ]] +epprd_rg:clifconfig[218] belongs_to_an_active_wpar 61.81.244.134 +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] [[ -z '' ]] +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] return 1 +epprd_rg:clifconfig[218] read wpar_name wpar_if wpar_netmask wpar_broadcast +epprd_rg:clifconfig[218] IFS='~' +epprd_rg:clifconfig[219] rc=1 +epprd_rg:clifconfig[221] [[ 1 == 0 ]] +epprd_rg:clifconfig[275] ifconfig en0 alias 61.81.244.134 netmask 255.255.255.0 +epprd_rg:cl_swap_IP_address[679] /usr/es/sbin/cluster/.restore_routes +epprd_rg:.restore_routes[+9] date +epprd_rg:.restore_routes[+9] : Starting /usr/es/sbin/cluster/.restore_routes at Sat Jan 28 18:00:17 KORST 2023 +epprd_rg:.restore_routes[+11] cl_route_change default 127.0.0.1 61.81.244.1 inet +epprd_rg:.restore_routes[+12] rc=0 +epprd_rg:.restore_routes[+13] [[ 0 != 0 ]] +epprd_rg:.restore_routes[+19] exit +epprd_rg:cl_swap_IP_address[680] [[ 0 == 0 ]] +epprd_rg:cl_swap_IP_address[680] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[681] : Completed /usr/es/sbin/cluster/.restore_routes with return code 0 +epprd_rg:cl_swap_IP_address[682] enable_pmtu_gated Setting tcp_pmtu_discover to 1 Setting udp_pmtu_discover to 1 +epprd_rg:cl_swap_IP_address[685] hats_adapter_notify en0 -d 61.81.244.156 alias 2023-01-28T18:00:17.226380 hats_adapter_notify 2023-01-28T18:00:17.227598 hats_adapter_notify +epprd_rg:cl_swap_IP_address[688] check_alias_status en0 61.81.244.156 release +epprd_rg:cl_swap_IP_address[check_alias_status:108] CH_INTERFACE=en0 +epprd_rg:cl_swap_IP_address[check_alias_status:109] CH_ADDRESS=61.81.244.156 +epprd_rg:cl_swap_IP_address[check_alias_status:110] CH_ACQ_OR_RLSE=release +epprd_rg:cl_swap_IP_address[check_alias_status:118] IF_IB=en0 +epprd_rg:cl_swap_IP_address[check_alias_status:120] awk '{print index($0, "ib")}' +epprd_rg:cl_swap_IP_address[check_alias_status:120] echo en0 +epprd_rg:cl_swap_IP_address[check_alias_status:120] IS_IB=0 +epprd_rg:cl_swap_IP_address[check_alias_status:122] [[ 0 != 1 ]] +epprd_rg:cl_swap_IP_address[check_alias_status:124] clifconfig en0 +epprd_rg:cl_swap_IP_address[check_alias_status:124] fgrep -w 61.81.244.156 +epprd_rg:cl_swap_IP_address[check_alias_status:124] awk '{print $2}' +epprd_rg:clifconfig[117] version=1.9 +epprd_rg:clifconfig[121] set -A args en0 +epprd_rg:clifconfig[124] interface=en0 +epprd_rg:clifconfig[125] shift +epprd_rg:clifconfig[127] [[ -n '' ]] +epprd_rg:clifconfig[174] [[ -n '' ]] +epprd_rg:clifconfig[218] belongs_to_an_active_wpar +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] [[ -z '' ]] +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] return 1 +epprd_rg:clifconfig[218] read wpar_name wpar_if wpar_netmask wpar_broadcast +epprd_rg:clifconfig[218] IFS='~' +epprd_rg:clifconfig[219] rc=1 +epprd_rg:clifconfig[221] [[ 1 == 0 ]] +epprd_rg:clifconfig[275] ifconfig en0 +epprd_rg:cl_swap_IP_address[check_alias_status:124] ADDR='' +epprd_rg:cl_swap_IP_address[check_alias_status:129] [ release = acquire ] +epprd_rg:cl_swap_IP_address[check_alias_status:139] [[ '' == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[check_alias_status:144] return 0 +epprd_rg:cl_swap_IP_address[689] RC1=0 +epprd_rg:cl_swap_IP_address[690] [[ 0 == 0 ]] +epprd_rg:cl_swap_IP_address[690] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[693] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[697] amlog_trace '' 'Deliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_swap_IP_address[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_swap_IP_address[amlog_trace:319] cltime +epprd_rg:cl_swap_IP_address[amlog_trace:319] DATE=2023-01-28T18:00:17.281918 +epprd_rg:cl_swap_IP_address[amlog_trace:320] echo '|2023-01-28T18:00:17.281918|INFO: Deliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_swap_IP_address[701] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[714] flush_arp +epprd_rg:cl_swap_IP_address[flush_arp:49] arp -an +epprd_rg:cl_swap_IP_address[flush_arp:49] grep '\?' +epprd_rg:cl_swap_IP_address[flush_arp:49] tr -d '()' +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.27 61.81.244.27 (61.81.244.27) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.217 61.81.244.217 (61.81.244.217) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.220 61.81.244.220 (61.81.244.220) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.221 61.81.244.221 (61.81.244.221) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.224 61.81.244.224 (61.81.244.224) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.239 61.81.244.239 (61.81.244.239) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.251 61.81.244.251 (61.81.244.251) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.252 61.81.244.252 (61.81.244.252) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.123 61.81.244.123 (61.81.244.123) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.126 61.81.244.126 (61.81.244.126) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.140 61.81.244.140 (61.81.244.140) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.145 61.81.244.145 (61.81.244.145) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.146 61.81.244.146 (61.81.244.146) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.1 61.81.244.1 (61.81.244.1) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.154 61.81.244.154 (61.81.244.154) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:52] return 0 +epprd_rg:cl_swap_IP_address[716] netstat -in Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll en0 1500 link#2 fa.e6.13.4e.a9.20 183735413 0 60752089 0 0 en0 1500 61.81.244 61.81.244.134 183735413 0 60752089 0 0 lo0 16896 link#1 34267434 0 34267434 0 0 lo0 16896 127 127.0.0.1 34267434 0 34267434 0 0 lo0 16896 ::1%1 34267434 0 34267434 0 0 +epprd_rg:cl_swap_IP_address[717] netstat -rnC Routing tables Destination Gateway Flags Wt Policy If Cost Config_Cost Route tree for Protocol Family 2 (Internet): default 61.81.244.1 UG 1 - en0 0 0 61.81.244.0 61.81.244.134 UHSb 1 - en0 0 0 => 61.81.244/24 61.81.244.134 U 1 - en0 0 0 61.81.244.134 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.255 61.81.244.134 UHSb 1 - en0 0 0 127/8 127.0.0.1 U 1 - lo0 0 0 Route tree for Protocol Family 24 (Internet v6): ::1%1 ::1%1 UH 1 - lo0 0 0 +epprd_rg:cl_swap_IP_address[989] no -o ipignoreredirects=0 Setting ipignoreredirects to 0 +epprd_rg:cl_swap_IP_address[992] cl_echo 32 'Completed execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0. Exit status = 0' /usr/es/sbin/cluster/events/utils/cl_swap_IP_address 'rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0' 0 Jan 28 2023 18:00:17Completed execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0. Exit status = 0+epprd_rg:cl_swap_IP_address[994] date Sat Jan 28 18:00:17 KORST 2023 +epprd_rg:cl_swap_IP_address[996] exit 0 +epprd_rg:release_service_addr[225] RC=0 +epprd_rg:release_service_addr[227] [[ 0 != 0 ]] +epprd_rg:release_service_addr[245] cl_RMupdate resource_down All_nonerror_service_addrs release_service_addr 2023-01-28T18:00:17.374980 2023-01-28T18:00:17.379434 +epprd_rg:release_service_addr[249] [[ UNDEFINED != UNDEFINED ]] +epprd_rg:release_service_addr[252] NSORDER='' +epprd_rg:release_service_addr[252] export NSORDER +epprd_rg:release_service_addr[255] exit 0 Jan 28 2023 18:00:17 EVENT COMPLETED: release_service_addr 0 |2023-01-28T18:00:17|22169|EVENT COMPLETED: release_service_addr 0| +epprd_rg:process_resources[release_service_labels:3129] RC=0 +epprd_rg:process_resources[release_service_labels:3131] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[release_service_labels:3146] (( 0 != 0 )) +epprd_rg:process_resources[release_service_labels:3152] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. +epprd_rg:process_resources[release_service_labels:3154] return 0 +epprd_rg:process_resources[3412] RC=0 +epprd_rg:process_resources[3413] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:00:19.622207 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=WPAR ACTION=RELEASE RESOURCE_GROUPS='"epprd_rg' '"' +epprd_rg:process_resources[1] JOB_TYPE=WPAR +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ WPAR == RELEASE ]] +epprd_rg:process_resources[3360] [[ WPAR == ONLINE ]] +epprd_rg:process_resources[3492] process_wpars RELEASE +epprd_rg:process_resources[process_wpars:3265] PS4_FUNC=process_wpars +epprd_rg:process_resources[process_wpars:3265] typeset PS4_FUNC +epprd_rg:process_resources[process_wpars:3266] [[ high == high ]] +epprd_rg:process_resources[process_wpars:3266] set -x +epprd_rg:process_resources[process_wpars:3267] STAT=0 +epprd_rg:process_resources[process_wpars:3268] action=RELEASE +epprd_rg:process_resources[process_wpars:3268] typeset action +epprd_rg:process_resources[process_wpars:3272] export GROUPNAME +epprd_rg:process_resources[process_wpars:3280] clstop_wpar +epprd_rg:clstop_wpar[42] version=1.7 +epprd_rg:clstop_wpar[46] [[ rg_move == reconfig_resource_release ]] +epprd_rg:clstop_wpar[46] [[ RELEASE_PRIMARY == reconfig_resource_release ]] +epprd_rg:clstop_wpar[55] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clstop_wpar[55] [[ -z '' ]] +epprd_rg:clstop_wpar[55] exit 0 +epprd_rg:process_resources[process_wpars:3281] RC=0 +epprd_rg:process_resources[process_wpars:3285] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[process_wpars:3294] return 0 +epprd_rg:process_resources[3493] RC=0 +epprd_rg:process_resources[3495] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3497] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:00:19.660112 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=OFFLINE RESOURCE_GROUPS='"epprd_rg"' +epprd_rg:process_resources[1] JOB_TYPE=OFFLINE +epprd_rg:process_resources[1] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ OFFLINE == RELEASE ]] +epprd_rg:process_resources[3360] [[ OFFLINE == ONLINE ]] +epprd_rg:process_resources[3681] set_resource_group_state DOWN +epprd_rg:process_resources[set_resource_group_state:82] PS4_FUNC=set_resource_group_state +epprd_rg:process_resources[set_resource_group_state:82] typeset PS4_FUNC +epprd_rg:process_resources[set_resource_group_state:83] [[ high == high ]] +epprd_rg:process_resources[set_resource_group_state:83] set -x +epprd_rg:process_resources[set_resource_group_state:84] STAT=0 +epprd_rg:process_resources[set_resource_group_state:85] new_status=DOWN +epprd_rg:process_resources[set_resource_group_state:89] export GROUPNAME +epprd_rg:process_resources[set_resource_group_state:90] [[ DOWN != DOWN ]] +epprd_rg:process_resources[set_resource_group_state:100] : Resource Manager Updates +epprd_rg:process_resources[set_resource_group_state:122] cl_RMupdate rg_down epprd_rg process_resources 2023-01-28T18:00:19.695964 2023-01-28T18:00:19.700081 +epprd_rg:process_resources[set_resource_group_state:124] amlog_trace '' 'acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:318] clcycle clavailability.log +epprd_rg:process_resources[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:process_resources[amlog_trace:319] cltime +epprd_rg:process_resources[amlog_trace:319] DATE=2023-01-28T18:00:19.731764 +epprd_rg:process_resources[amlog_trace:320] echo '|2023-01-28T18:00:19.731764|INFO: acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:process_resources[set_resource_group_state:153] return 0 +epprd_rg:process_resources[3682] RC=0 +epprd_rg:process_resources[3683] postvg_for_rdisk +epprd_rg:process_resources[postvg_for_rdisk:856] PS4_FUNC=postvg_for_rdisk +epprd_rg:process_resources[postvg_for_rdisk:856] typeset PS4_FUNC +epprd_rg:process_resources[postvg_for_rdisk:857] [[ high == high ]] +epprd_rg:process_resources[postvg_for_rdisk:857] set -x +epprd_rg:process_resources[postvg_for_rdisk:858] STAT=0 +epprd_rg:process_resources[postvg_for_rdisk:859] FAILURE_IN_METHOD=0 +epprd_rg:process_resources[postvg_for_rdisk:859] typeset -li FAILURE_IN_METHOD +epprd_rg:process_resources[postvg_for_rdisk:860] LIST_OF_FAILED_RGS='' +epprd_rg:process_resources[postvg_for_rdisk:861] RG_LIST=epprd_rg +epprd_rg:process_resources[postvg_for_rdisk:862] RDISK_LIST='' +epprd_rg:process_resources[postvg_for_rdisk:863] DISK_LIST='' +epprd_rg:process_resources[postvg_for_rdisk:866] : Resource groups are processed individually. This is required because +epprd_rg:process_resources[postvg_for_rdisk:867] : the replication mechanism may differ between resource groups. +epprd_rg:process_resources[postvg_for_rdisk:871] getReplicatedResources epprd_rg +epprd_rg:process_resources[getReplicatedResources:699] PS4_FUNC=getReplicatedResources +epprd_rg:process_resources[getReplicatedResources:699] typeset PS4_FUNC +epprd_rg:process_resources[getReplicatedResources:700] [[ high == high ]] +epprd_rg:process_resources[getReplicatedResources:700] set -x +epprd_rg:process_resources[getReplicatedResources:702] RV=false +epprd_rg:process_resources[getReplicatedResources:704] clodmget -n -f type HACMPrresmethods +epprd_rg:process_resources[getReplicatedResources:704] [[ -n 9 ]] +epprd_rg:process_resources[getReplicatedResources:707] : Replicated resource methods are defined, check for resources +epprd_rg:process_resources[getReplicatedResources:709] clodmget -q $'name like \'*_REP_RESOURCE\' AND group=epprd_rg' -f value -n HACMPresource +epprd_rg:process_resources[getReplicatedResources:709] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:718] : Verify if any backup profiles are configured and trigger cbm utilities based on that +epprd_rg:process_resources[getReplicatedResources:720] clodmget -q name=BACKUP_ENABLED -f value HACMPresource +epprd_rg:process_resources[getReplicatedResources:720] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:739] echo false +epprd_rg:process_resources[postvg_for_rdisk:871] REPLICATED_RESOURCES=false +epprd_rg:process_resources[postvg_for_rdisk:873] [[ false == true ]] +epprd_rg:process_resources[postvg_for_rdisk:946] return 0 +epprd_rg:process_resources[3684] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:00:19.756044 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=NONE +epprd_rg:process_resources[1] JOB_TYPE=NONE +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ NONE == RELEASE ]] +epprd_rg:process_resources[3360] [[ NONE == ONLINE ]] +epprd_rg:process_resources[3729] break +epprd_rg:process_resources[3740] : If sddsrv was turned off above, turn it back on again +epprd_rg:process_resources[3742] [[ FALSE == TRUE ]] +epprd_rg:process_resources[3747] exit 0 :rg_move[247] : unsetting AM_SYNC_CALLED_BY from $'callers environment as\n: we dont' require it after this point in execution. :rg_move[250] unset AM_SYNC_CALLED_BY :rg_move[253] [[ -f /tmp/.NFSSTOPPED ]] :rg_move[274] [[ -f /tmp/.RPCLOCKDSTOPPED ]] :rg_move[293] exit 0 Jan 28 2023 18:00:19 EVENT COMPLETED: rg_move epprda 1 RELEASE 0 |2023-01-28T18:00:19|22169|EVENT COMPLETED: rg_move epprda 1 RELEASE 0| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-01-28T18:00:19.850501 :clevlog[amlog_trace:320] echo '|2023-01-28T18:00:19.850501|INFO: rg_move|epprd_rg|epprda|1|RELEASE|0' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :rg_move_release[+68] exit 0 Jan 28 2023 18:00:19 EVENT COMPLETED: rg_move_release epprda 1 0 |2023-01-28T18:00:19|22169|EVENT COMPLETED: rg_move_release epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:00:19.975887 + echo '|2023-01-28T18:00:19.975887|INFO: rg_move_release|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 18:00:22 EVENT START: rg_move_fence epprda 1 |2023-01-28T18:00:22|22169|EVENT START: rg_move_fence epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:00:22.172081 + echo '|2023-01-28T18:00:22.172081|INFO: rg_move_fence|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_fence[62] [[ high == high ]] :rg_move_fence[62] version=1.11 :rg_move_fence[63] NODENAME=epprda :rg_move_fence[63] export NODENAME :rg_move_fence[65] set -u :rg_move_fence[67] [ 2 != 2 ] :rg_move_fence[73] set +u :rg_move_fence[75] [[ -z TRUE ]] :rg_move_fence[80] [[ TRUE == TRUE ]] :rg_move_fence[82] LOCAL_NODENAME=epprda :rg_move_fence[83] odmget -qid=1 HACMPgroup :rg_move_fence[83] egrep 'group =' :rg_move_fence[83] awk '{print $3}' :rg_move_fence[83] eval RGNAME='"epprd_rg"' :rg_move_fence[1] RGNAME=epprd_rg +epprd_rg:rg_move_fence[84] GROUPNAME=epprd_rg +epprd_rg:rg_move_fence[85] group_state='$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[86] set +u +epprd_rg:rg_move_fence[87] eval print '$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[1] print ONLINE +epprd_rg:rg_move_fence[87] RG_MOVE_ONLINE=ONLINE +epprd_rg:rg_move_fence[87] export RG_MOVE_ONLINE +epprd_rg:rg_move_fence[88] set -u +epprd_rg:rg_move_fence[89] RG_MOVE_ONLINE=ONLINE +epprd_rg:rg_move_fence[91] set -a +epprd_rg:rg_move_fence[92] clsetenvgrp epprda rg_move epprd_rg '' :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 +epprd_rg:rg_move_fence[92] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[93] RC=0 +epprd_rg:rg_move_fence[94] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[1] FORCEDOWN_GROUPS='' +epprd_rg:rg_move_fence[2] RESOURCE_GROUPS='' +epprd_rg:rg_move_fence[3] HOMELESS_GROUPS='' +epprd_rg:rg_move_fence[4] HOMELESS_FOLLOWER_GROUPS='' +epprd_rg:rg_move_fence[5] ERRSTATE_GROUPS='' +epprd_rg:rg_move_fence[6] PRINCIPAL_ACTIONS='' +epprd_rg:rg_move_fence[7] ASSOCIATE_ACTIONS='' +epprd_rg:rg_move_fence[8] AUXILLIARY_ACTIONS='' +epprd_rg:rg_move_fence[8] SIBLING_GROUPS='' +epprd_rg:rg_move_fence[9] SIBLING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[10] SIBLING_ACQUIRING_GROUPS='' +epprd_rg:rg_move_fence[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[12] SIBLING_RELEASING_GROUPS='' +epprd_rg:rg_move_fence[13] SIBLING_RELEASING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[95] set +a +epprd_rg:rg_move_fence[96] [ 0 -ne 0 ] +epprd_rg:rg_move_fence[103] process_resources FENCE :rg_move_fence[3318] version=1.169 :rg_move_fence[3321] STATUS=0 :rg_move_fence[3322] sddsrv_off=FALSE :rg_move_fence[3324] true :rg_move_fence[3326] : call rgpa, and it will tell us what to do next :rg_move_fence[3328] set -a :rg_move_fence[3329] clRGPA FENCE :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa FENCE 2023-01-28T18:00:22.277569 clrgpa :clRGPA[+55] exit 0 :rg_move_fence[3329] eval JOB_TYPE=NONE :rg_move_fence[1] JOB_TYPE=NONE :rg_move_fence[3330] RC=0 :rg_move_fence[3331] set +a :rg_move_fence[3333] (( 0 != 0 )) :rg_move_fence[3342] RESOURCE_GROUPS='' :rg_move_fence[3343] GROUPNAME='' :rg_move_fence[3343] export GROUPNAME :rg_move_fence[3353] IS_SERVICE_START=1 :rg_move_fence[3354] IS_SERVICE_STOP=1 :rg_move_fence[3360] [[ NONE == RELEASE ]] :rg_move_fence[3360] [[ NONE == ONLINE ]] :rg_move_fence[3729] break :rg_move_fence[3740] : If sddsrv was turned off above, turn it back on again :rg_move_fence[3742] [[ FALSE == TRUE ]] :rg_move_fence[3747] exit 0 +epprd_rg:rg_move_fence[104] : exit status of process_resources FENCE is: 0 +epprd_rg:rg_move_fence[107] [[ TRUE == TRUE ]] +epprd_rg:rg_move_fence[109] export EVENT_TYPE +epprd_rg:rg_move_fence[110] echo RELEASE_PRIMARY RELEASE_PRIMARY +epprd_rg:rg_move_fence[111] [[ -n '' ]] +epprd_rg:rg_move_fence[141] exit 0 Jan 28 2023 18:00:22 EVENT COMPLETED: rg_move_fence epprda 1 0 |2023-01-28T18:00:22|22169|EVENT COMPLETED: rg_move_fence epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:00:22.370858 + echo '|2023-01-28T18:00:22.370858|INFO: rg_move_fence|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log PowerHA SystemMirror Event Summary ---------------------------------------------------------------------------- Serial number for this event: 22169 Event: TE_RG_MOVE_RELEASE Start time: Sat Jan 28 18:00:05 2023 End time: Sat Jan 28 18:00:22 2023 Action: Resource: Script Name: ---------------------------------------------------------------------------- Releasing resource group: epprd_rg process_resources Search on: Sat.Jan.28.18:00:06.KORST.2023.process_resources.epprd_rg.ref Releasing resource: All_servers stop_server Search on: Sat.Jan.28.18:00:06.KORST.2023.stop_server.All_servers.epprd_rg.ref Resource offline: All_nonerror_servers stop_server Search on: Sat.Jan.28.18:00:06.KORST.2023.stop_server.All_nonerror_servers.epprd_rg.ref Releasing resource: All_nfs_mounts cl_deactivate_nfs Search on: Sat.Jan.28.18:00:07.KORST.2023.cl_deactivate_nfs.All_nfs_mounts.epprd_rg.ref Resource offline: All_nonerror_nfs_mounts cl_deactivate_nfs Search on: Sat.Jan.28.18:00:11.KORST.2023.cl_deactivate_nfs.All_nonerror_nfs_mounts.epprd_rg.ref Releasing resource: All_exports cl_unexport_fs Search on: Sat.Jan.28.18:00:11.KORST.2023.cl_unexport_fs.All_exports.epprd_rg.ref Resource offline: All_nonerror_exports cl_unexport_fs Search on: Sat.Jan.28.18:00:11.KORST.2023.cl_unexport_fs.All_nonerror_exports.epprd_rg.ref Releasing resource: All_filesystems cl_deactivate_fs Search on: Sat.Jan.28.18:00:11.KORST.2023.cl_deactivate_fs.All_filesystems.epprd_rg.ref Resource offline: All_non_error_filesystems cl_deactivate_fs Search on: Sat.Jan.28.18:00:15.KORST.2023.cl_deactivate_fs.All_non_error_filesystems.epprd_rg.ref Releasing resource: All_volume_groups cl_deactivate_vgs Search on: Sat.Jan.28.18:00:15.KORST.2023.cl_deactivate_vgs.All_volume_groups.epprd_rg.ref Resource offline: All_nonerror_volume_groups cl_deactivate_vgs Search on: Sat.Jan.28.18:00:16.KORST.2023.cl_deactivate_vgs.All_nonerror_volume_groups.epprd_rg.ref Releasing resource: All_service_addrs release_service_addr Search on: Sat.Jan.28.18:00:16.KORST.2023.release_service_addr.All_service_addrs.epprd_rg.ref Resource offline: All_nonerror_service_addrs release_service_addr Search on: Sat.Jan.28.18:00:17.KORST.2023.release_service_addr.All_nonerror_service_addrs.epprd_rg.ref Resource group offline: epprd_rg process_resources Search on: Sat.Jan.28.18:00:19.KORST.2023.process_resources.epprd_rg.ref ---------------------------------------------------------------------------- |EVENT_SUMMARY_START|TE_RG_MOVE_RELEASE|2023-01-28T18:00:05|2023-01-28T18:00:22|22169| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:00:06.KORST.2023.process_resources.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:00:06.KORST.2023.stop_server.All_servers.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:00:06.KORST.2023.stop_server.All_nonerror_servers.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:00:07.KORST.2023.cl_deactivate_nfs.All_nfs_mounts.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:00:11.KORST.2023.cl_deactivate_nfs.All_nonerror_nfs_mounts.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:00:11.KORST.2023.cl_unexport_fs.All_exports.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:00:11.KORST.2023.cl_unexport_fs.All_nonerror_exports.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:00:11.KORST.2023.cl_deactivate_fs.All_filesystems.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:00:15.KORST.2023.cl_deactivate_fs.All_non_error_filesystems.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:00:15.KORST.2023.cl_deactivate_vgs.All_volume_groups.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:00:16.KORST.2023.cl_deactivate_vgs.All_nonerror_volume_groups.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:00:16.KORST.2023.release_service_addr.All_service_addrs.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:00:17.KORST.2023.release_service_addr.All_nonerror_service_addrs.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:00:19.KORST.2023.process_resources.epprd_rg.ref.ref| |EVENT_SUMMARY_END| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 22177 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_NETWORK|2023-01-28T18:00:26|22177| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Jan 28 2023 18:00:26 EVENT START: network_up epprda net_ether_01 |2023-01-28T18:00:26|22177|EVENT START: network_up epprda net_ether_01| :network_up[+66] version=%I% :network_up[+69] set -a :network_up[+70] cllsparam -n epprda :network_up[+70] eval NODE_NAME=epprda VERBOSE_LOGGING=high PS4='${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO] ' DEBUG_LEVEL=Standard LC_ALL='C' :network_up[+70] NODE_NAME=epprda VERBOSE_LOGGING=high PS4=${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO] DEBUG_LEVEL=Standard LC_ALL=C :network_up[+71] set +a :network_up[+73] STATUS=0 :network_up[+75] [ 2 -ne 2 ] :network_up[+81] [[ epprda == epprda ]] :network_up[+82] amlog_trace 22177|epprda|net_ether_01 :network_up[+61] clcycle clavailability.log :network_up[+61] 1> /dev/null 2>& 1 :network_up[+61] :network_up[+61] cltime DATE=2023-01-28T18:00:26.950383 :network_up[+61] echo |2023-01-28T18:00:26.950383|INFO: 22177|epprda|net_ether_01 :network_up[+61] 1>> /var/hacmp/availability/clavailability.log :network_up[+84] export NETWORKNAME=net_ether_01 :network_up[+89] [[ epprda == epprda ]] :network_up[+90] amlog_trace 22177|epprda|net_ether_01 :network_up[+61] clcycle clavailability.log :network_up[+61] 1> /dev/null 2>& 1 :network_up[+61] :network_up[+61] cltime DATE=2023-01-28T18:00:26.977266 :network_up[+61] echo |2023-01-28T18:00:26.977266|INFO: 22177|epprda|net_ether_01 :network_up[+61] 1>> /var/hacmp/availability/clavailability.log :network_up[+92] exit 0 Jan 28 2023 18:00:26 EVENT COMPLETED: network_up epprda net_ether_01 0 |2023-01-28T18:00:27|22177|EVENT COMPLETED: network_up epprda net_ether_01 0| Jan 28 2023 18:00:27 EVENT START: network_up_complete epprda net_ether_01 |2023-01-28T18:00:27|22177|EVENT START: network_up_complete epprda net_ether_01| :network_up_complete[+68] version=%I% :network_up_complete[+72] [ 2 -ne 2 ] :network_up_complete[+78] [[ epprda == epprda ]] :network_up_complete[+79] amlog_trace 22177|epprda|net_ether_01 :network_up_complete[+61] clcycle clavailability.log :network_up_complete[+61] 1> /dev/null 2>& 1 :network_up_complete[+61] :network_up_complete[+61] cltime DATE=2023-01-28T18:00:27.237634 :network_up_complete[+61] echo |2023-01-28T18:00:27.237634|INFO: 22177|epprda|net_ether_01 :network_up_complete[+61] 1>> /var/hacmp/availability/clavailability.log :network_up_complete[+82] NODENAME=epprda :network_up_complete[+83] NETWORK=net_ether_01 :network_up_complete[+84] export NETWORKNAME=net_ether_01 :network_up_complete[+86] [[ -z ]] :network_up_complete[+88] EMULATE=REAL :network_up_complete[+90] set -u :network_up_complete[+96] STATUS=0 :network_up_complete[+100] odmget HACMPnode :network_up_complete[+100] grep name = :network_up_complete[+100] sort :network_up_complete[+100] uniq :network_up_complete[+100] wc -l :network_up_complete[+100] [ 2 -eq 2 ] :network_up_complete[+102] :network_up_complete[+102] odmget HACMPgroup :network_up_complete[+102] grep group = :network_up_complete[+102] awk {print $3} :network_up_complete[+102] sed s/"//g RESOURCE_GROUPS=epprd_rg :network_up_complete[+106] :network_up_complete[+106] odmget -q group=epprd_rg AND name=EXPORT_FILESYSTEM HACMPresource :network_up_complete[+106] grep value :network_up_complete[+106] awk {print $3} :network_up_complete[+106] sed s/"//g EXPORTLIST=/board_org :network_up_complete[+107] [ -n /board_org ] :network_up_complete[+109] [ REAL = EMUL ] :network_up_complete[+114] cl_update_statd :cl_update_statd(0)[+174] version=%I% :cl_update_statd(0)[+176] typeset -i RC=0 :cl_update_statd(0)[+178] LOCAL_FOUND= :cl_update_statd(0)[+179] TWIN_NAME= :cl_update_statd(0)[+180] [[ -z epprda ]] :cl_update_statd(0)[+181] :cl_update_statd(0)[+181] cl_get_path -S OP_SEP=~ :cl_update_statd(0)[+182] set -u :cl_update_statd(0)[+187] LOCAL_FOUND=true :cl_update_statd(0)[+189] TWIN_NAME=epprds :cl_update_statd(0)[+194] : Make sure statd is running locally :cl_update_statd(0)[+196] lssrc -s statd :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw inoperative :cl_update_statd(0)[+196] rpcinfo -p :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw status :cl_update_statd(0)[+207] : Get the current twin, if there is one :cl_update_statd(0)[+209] :cl_update_statd(0)[+209] nfso -H sm_gethost :cl_update_statd(0)[+209] 2>& 1 CURTWIN=epprds :cl_update_statd(0)[+210] RC=0 :cl_update_statd(0)[+212] [[ -z true ]] :cl_update_statd(0)[+212] [[ -z epprds ]] :cl_update_statd(0)[+225] : Get the interface to the twin node :cl_update_statd(0)[+227] :cl_update_statd(0)[+227] get_node_ip epprds :cl_update_statd(0)[+9] (( 1 != 1 )) :cl_update_statd(0)[+15] Twin_Name=epprds :cl_update_statd(0)[+16] NewTwin= :cl_update_statd(0)[+19] : Get the Interface details for every interface on the twin node :cl_update_statd(0)[+20] : Reject interfaces on nodes that are not public boot addresses :cl_update_statd(0)[+21] : because those are the only ones we have state information for :cl_update_statd(0)[+23] :cl_update_statd(0)[+23] cllsif -J ~ -Sw -i epprda :cl_update_statd(0)[+23] LC_ALL=C LOCAL_NETWORK_INFO=epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+24] cllsif -J ~ -Sw -i epprds :cl_update_statd(0)[+24] LC_ALL=C :cl_update_statd(0)[+25] read adapt type network net_type attrib node ip_addr skip interface skip netmask skip skip prefix ip_family :cl_update_statd(0)[+25] IFS=~ :cl_update_statd(0)[+25] [[ public != public ]] :cl_update_statd(0)[+25] [[ boot != boot ]] :cl_update_statd(0)[+33] : Find the state of this candidate :cl_update_statd(0)[+33] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+37] :cl_update_statd(0)[+37] print 61.81.244.123 :cl_update_statd(0)[+37] tr ./ xx addr=i61x81x244x123_epprds :cl_update_statd(0)[+43] eval candidate_state=${i61x81x244x123_epprds:-down} :cl_update_statd(0)[+43] candidate_state=UP :cl_update_statd(0)[+46] : If state is UP, check to see if this node can talk to it :cl_update_statd(0)[+46] [[ UP == UP ]] :cl_update_statd(0)[+50] ping -w 5 -c 1 -q 61.81.244.123 :cl_update_statd(0)[+50] 1> /dev/null :cl_update_statd(0)[+61] echo epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+61] tr \n :cl_update_statd(0)[+62] read lcl_adapt lcl_type lcl_network lcl_net_type lcl_attrib lcl_node lcl_ip_addr skip lcl_interface skip lcl_netmask skip skip lcl_prefix lcl_ip_family :cl_update_statd(0)[+62] IFS=~ :cl_update_statd(0)[+62] [[ net_ether_01 != net_ether_01 ]] :cl_update_statd(0)[+62] [[ boot != boot ]] :cl_update_statd(0)[+62] [[ public != public ]] :cl_update_statd(0)[+62] [[ AF_INET != AF_INET ]] :cl_update_statd(0)[+62] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+71] :cl_update_statd(0)[+71] print 61.81.244.134 :cl_update_statd(0)[+71] tr ./ xx addr=i61x81x244x134_epprda :cl_update_statd(0)[+77] eval lcl_candidate_state=${i61x81x244x134_epprda:-down} :cl_update_statd(0)[+77] lcl_candidate_state=UP :cl_update_statd(0)[+77] [[ UP == UP ]] :cl_update_statd(0)[+81] : epprds is on the same network as an interface that is up :cl_update_statd(0)[+82] : on the local node, and the attributes match. :cl_update_statd(0)[+84] NewTwin=epprds :cl_update_statd(0)[+85] break :cl_update_statd(0)[+85] [[ -n epprds ]] :cl_update_statd(0)[+91] break :cl_update_statd(0)[+91] [[ -z epprds ]] :cl_update_statd(0)[+100] echo epprds :cl_update_statd(0)[+101] return 0 NEWTWIN=epprds :cl_update_statd(0)[+227] [[ -z epprds ]] :cl_update_statd(0)[+227] [[ epprds != epprds ]] :cl_update_statd(0)[+259] : RC is actually 0 :cl_update_statd(0)[+266] return 0 :network_up_complete[+115] [ 0 -ne 0 ] :network_up_complete[+120] break :network_up_complete[+125] [[ epprda == epprda ]] :network_up_complete[+131] :network_up_complete[+131] odmget -qname=net_ether_01 HACMPnetwork :network_up_complete[+131] awk $1 == "alias" {print $3} :network_up_complete[+131] sed s/"//g ALIASING=1 :network_up_complete[+131] [[ 1 == 1 ]] :network_up_complete[+133] cl_configure_persistent_address aliasing_network_up -n net_ether_01 :cl_configure_persistent_address[1344] version=1.56.1.4 :cl_configure_persistent_address[1346] cl_get_path -S :cl_configure_persistent_address[1346] OP_SEP='~' :cl_configure_persistent_address[1349] get_local_nodename :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :cl_configure_persistent_address[1349] LOCALNODENAME=epprda :cl_configure_persistent_address[1354] [[ -z epprda ]] :cl_configure_persistent_address[1356] NETWORK='' :cl_configure_persistent_address[1357] ALIVE_IF='' :cl_configure_persistent_address[1358] FAILED_IF='' :cl_configure_persistent_address[1359] FAILED_ADDRESS='' :cl_configure_persistent_address[1360] UPDATE_CLSTRMGR=1 :cl_configure_persistent_address[1361] CHECK_HA_ALIVE=1 :cl_configure_persistent_address[1362] RESTORE_ROUTES=/usr/es/sbin/cluster/.pers_restore_routes :cl_configure_persistent_address[1363] RC=0 :cl_configure_persistent_address[1364] B_FLAG=0 :cl_configure_persistent_address[1366] ACTION=aliasing_network_up :cl_configure_persistent_address[1367] shift :cl_configure_persistent_address[1369] getopt n:a:f:i:dPB -n net_ether_01 :cl_configure_persistent_address[1369] set -- -n net_ether_01 -- :cl_configure_persistent_address[1371] (( 0 != 0 )) :cl_configure_persistent_address[1371] [[ -z aliasing_network_up ]] :cl_configure_persistent_address[1376] [[ -n != -- ]] :cl_configure_persistent_address[1379] NETWORK=net_ether_01 :cl_configure_persistent_address[1380] shift :cl_configure_persistent_address[1380] shift :cl_configure_persistent_address[1376] [[ -- != -- ]] :cl_configure_persistent_address[1418] shift :cl_configure_persistent_address[1422] [[ aliasing_network_up == up ]] :cl_configure_persistent_address[1520] [[ aliasing_network_up == swap ]] :cl_configure_persistent_address[1667] [[ aliasing_network_up == fail_boot ]] :cl_configure_persistent_address[1830] [[ aliasing_network_up == aliasing_network_up ]] :cl_configure_persistent_address[1831] [[ -z net_ether_01 ]] :cl_configure_persistent_address[1837] isAliasingNetwork net_ether_01 :cl_configure_persistent_address[isAliasingNetwork:386] PS4_FUNC=isAliasingNetwork :cl_configure_persistent_address[isAliasingNetwork:386] typeset PS4_FUNC :cl_configure_persistent_address[isAliasingNetwork:387] [[ high == high ]] :cl_configure_persistent_address[isAliasingNetwork:387] set -x :cl_configure_persistent_address[isAliasingNetwork:389] NETWORK=net_ether_01 :cl_configure_persistent_address[isAliasingNetwork:391] odmget -qname=net_ether_01 HACMPnetwork :cl_configure_persistent_address[isAliasingNetwork:392] awk '$1 == "alias" {print $3}' :cl_configure_persistent_address[isAliasingNetwork:393] sed 's/"//g' :cl_configure_persistent_address[isAliasingNetwork:391] print 1 :cl_configure_persistent_address[1837] [[ 1 != 1 ]] :cl_configure_persistent_address[1842] cllsif -J '~' -Spi epprda :cl_configure_persistent_address[1842] awk -F~ '$2 == "persistent" && $3 == "net_ether_01" {print $1}' :cl_configure_persistent_address[1842] PERSISTENT='' :cl_configure_persistent_address[1844] [[ -z '' ]] :cl_configure_persistent_address[1846] exit 0 :network_up_complete[+141] :network_up_complete[+141] cl_rrmethods2call net_initialization :cl_rrmethods2call[56] version=%I% :cl_rrmethods2call[84] RRMETHODS='' :cl_rrmethods2call[85] NEED_RR_ENV_VARS=no :cl_rrmethods2call[90] : The network methods are returned if the Network type is XD_data. :cl_rrmethods2call[92] clodmget -qname=net_ether_01 -f nimname -n HACMPnetwork :cl_rrmethods2call[92] RRNET=ether :cl_rrmethods2call[94] [[ ether == XD_data ]] :cl_rrmethods2call[98] return 0 METHODS= :network_up_complete[+163] :network_up_complete[+163] clodmget -n -q name=MOUNT_FILESYSTEM -f group HACMPresource CROSSMOUNTS=epprd_rg :network_up_complete[+165] [ -n epprd_rg -a epprda = epprda ] :network_up_complete[+168] : Remount any NFS cross mount if required :network_up_complete[+174] :network_up_complete[+174] clodmget -n -f group HACMPgroup RESOURCE_GROUPS=epprd_rg :network_up_complete[+185] :network_up_complete[+185] clodmget -n -q name=MOUNT_FILESYSTEM and group=epprd_rg -f value HACMPresource MOUNT_FILESYSTEM=/board;/board_org :network_up_complete[+185] [[ -z /board;/board_org ]] :network_up_complete[+189] IN_RG=false :network_up_complete[+189] clodmget -n -q group=epprd_rg -f nodes HACMPgroup :network_up_complete[+189] [[ epprda == epprda ]] :network_up_complete[+192] IN_RG=true :network_up_complete[+192] [[ epprds == epprda ]] :network_up_complete[+192] [[ true == false ]] :network_up_complete[+197] :network_up_complete[+197] clRGinfo -s epprd_rg :network_up_complete[+197] awk -F : { if ( $2 == "ONLINE" ) print $3 } clRGinfo[431]: version I clRGinfo[517]: Number of resource groups = 1 clRGinfo[562]: cluster epprda_cluster is version = 22 clRGinfo[1439]: IPC target host name is 'localhost' clRGinfo[685]: Current group is 'epprd_rg' get primary state info for state 4 get secondary state info for state 4 getPrimaryStateStr: using primary_table => primary_state_for_short_output_table get primary state info for state 4 get secondary state info for state 4 getPreviousStateString: Primary=4, Sec=-1 get primary state info for state 4 get secondary state info for state 4 getPrimaryStateStr: using primary_table => primary_state_for_short_output_table get primary state info for state 4 get secondary state info for state 4 getPreviousStateString: Primary=4, Sec=-1 NFS_HOST= :network_up_complete[+197] [[ -z ]] :network_up_complete[+198] continue :network_up_complete[+257] [[ epprda == epprda ]] :network_up_complete[+257] [[ 0 -ne 0 ]] :network_up_complete[+262] amlog_trace 22177|epprda|net_ether_01 :network_up_complete[+61] clcycle clavailability.log :network_up_complete[+61] 1> /dev/null 2>& 1 :network_up_complete[+61] :network_up_complete[+61] cltime DATE=2023-01-28T18:00:27.434191 :network_up_complete[+61] echo |2023-01-28T18:00:27.434191|INFO: 22177|epprda|net_ether_01 :network_up_complete[+61] 1>> /var/hacmp/availability/clavailability.log :network_up_complete[+265] exit 0 Jan 28 2023 18:00:27 EVENT COMPLETED: network_up_complete epprda net_ether_01 0 |2023-01-28T18:00:27|22177|EVENT COMPLETED: network_up_complete epprda net_ether_01 0| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 22170 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_FAIL_NODE_DEP_COMPLETE|2023-01-28T18:00:29|22170| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Jan 28 2023 18:00:29 EVENT START: node_down_complete epprda |2023-01-28T18:00:29|22170|EVENT START: node_down_complete epprda| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:00:29.686321 + echo '|2023-01-28T18:00:29.686321|INFO: node_down_complete|epprda' + 1>> /var/hacmp/availability/clavailability.log :node_down_complete[107] version=%I% :node_down_complete[111] : Pick up input :node_down_complete[113] NODENAME=epprda :node_down_complete[113] export NODENAME :node_down_complete[114] PARAM='' :node_down_complete[114] export PARAM :node_down_complete[116] NODE_HALT_CONTROL_FILE=/usr/es/sbin/cluster/etc/ha_nodehalt.lock :node_down_complete[125] STATUS=0 :node_down_complete[127] set -u :node_down_complete[129] (( 1 < 1 )) :node_down_complete[136] : serial number for this event is 22170 :node_down_complete[139] [[ '' == forced ]] :node_down_complete[151] : if RG_DEPENDENCIES is set to false by the cluster manager, :node_down_complete[152] : then resource groups will be processed via clsetenvgrp :node_down_complete[154] [[ '' != forced ]] :node_down_complete[154] [[ TRUE == FALSE ]] :node_down_complete[184] : For each participating resource group, serially process the resources :node_down_complete[186] LOCALCOMP=N :node_down_complete[189] : if RG_DEPENDENCIES is set to false by the cluster manager, :node_down_complete[190] : then resource groups will be processed via clsetenvgrp :node_down_complete[192] [[ '' != forced ]] :node_down_complete[192] [[ TRUE == FALSE ]] :node_down_complete[232] [[ '' != forced ]] :node_down_complete[232] [[ epprda == epprda ]] :node_down_complete[235] : Call ss-unload replicated resource methods if they are defined :node_down_complete[237] cl_rrmethods2call ss_unload :cl_rrmethods2call[56] version=%I% :cl_rrmethods2call[84] RRMETHODS='' :cl_rrmethods2call[85] NEED_RR_ENV_VARS=no :cl_rrmethods2call[104] : The load and unload methods if defined are returned on the :cl_rrmethods2call[105] : local node :cl_rrmethods2call[107] [[ epprda == epprda ]] :cl_rrmethods2call[109] NEED_RR_ENV_VARS=yes :cl_rrmethods2call[129] : Set the '*_REP_RESOURCE' variables if needed. :cl_rrmethods2call[131] [[ yes == yes ]] :cl_rrmethods2call[133] cllsres :cl_rrmethods2call[133] 2> /dev/null :cl_rrmethods2call[133] eval APPLICATIONS='"epprd_app"' EXPORT_FILESYSTEM='"/board_org"' FILESYSTEM='""' FORCED_VARYON='"false"' FSCHECK_TOOL='"fsck"' FS_BEFORE_IPADDR='"false"' MOUNT_FILESYSTEM='"/board;/board_org"' RECOVERY_METHOD='"sequential"' SERVICE_LABEL='"epprd"' SSA_DISK_FENCING='"false"' VG_AUTO_IMPORT='"false"' VOLUME_GROUP='"datavg"' USERDEFINED_RESOURCES='""' :cl_rrmethods2call[1] APPLICATIONS=epprd_app :cl_rrmethods2call[1] EXPORT_FILESYSTEM=/board_org :cl_rrmethods2call[1] FILESYSTEM='' :cl_rrmethods2call[1] FORCED_VARYON=false :cl_rrmethods2call[1] FSCHECK_TOOL=fsck :cl_rrmethods2call[1] FS_BEFORE_IPADDR=false :cl_rrmethods2call[1] MOUNT_FILESYSTEM='/board;/board_org' :cl_rrmethods2call[1] RECOVERY_METHOD=sequential :cl_rrmethods2call[1] SERVICE_LABEL=epprd :cl_rrmethods2call[1] SSA_DISK_FENCING=false :cl_rrmethods2call[1] VG_AUTO_IMPORT=false :cl_rrmethods2call[1] VOLUME_GROUP=datavg :cl_rrmethods2call[1] USERDEFINED_RESOURCES='' :cl_rrmethods2call[137] [[ -n '' ]] :cl_rrmethods2call[142] [[ -n '' ]] :cl_rrmethods2call[147] [[ -n '' ]] :cl_rrmethods2call[152] [[ -n '' ]] :cl_rrmethods2call[157] [[ -n '' ]] :cl_rrmethods2call[162] [[ -n '' ]] :cl_rrmethods2call[167] [[ -n '' ]] :cl_rrmethods2call[172] [[ -n '' ]] :cl_rrmethods2call[182] [[ -z '' ]] :cl_rrmethods2call[184] typeset sysmgdata :cl_rrmethods2call[185] typeset reposmgdata :cl_rrmethods2call[186] [[ -x /usr/es/sbin/cluster/xd_generic/xd_cli/clxd_list_mg_smit ]] :cl_rrmethods2call[191] [[ -n '' ]] :cl_rrmethods2call[191] [[ -n '' ]] :cl_rrmethods2call[197] echo '' :cl_rrmethods2call[199] return 0 :node_down_complete[237] METHODS='' :node_down_complete[251] : If dependencies are configured and node is being forced down then :node_down_complete[252] : no need to do varyoff for any passive mode VGs :node_down_complete[254] [[ TRUE == TRUE ]] :node_down_complete[257] : If any volume groups were varied on in passive mode when this node :node_down_complete[258] : came up, all the prior resource group processing would have left them :node_down_complete[259] : in passive mode. Completely vary them off at this point. :node_down_complete[261] lsvg -L :node_down_complete[261] lsvg -L -o :node_down_complete[261] paste -s '-d|' - :node_down_complete[261] grep -w -v -x -E 'caavg_private|rootvg' :node_down_complete[261] INACTIVE_VGS=datavg :node_down_complete[264] lsvg -L datavg :node_down_complete[264] 2> /dev/null :node_down_complete[264] grep -i -q passive-only :node_down_complete[267] : Reset any read only fence height prior to vary off :node_down_complete[269] cl_set_vg_fence_height -c datavg rw cl_set_vg_fence_height[126]: version @(#)10 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 cl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY) cl_set_vg_fence_height[214]: read(datavg, 16) cl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0)) :node_down_complete[270] RC=0 :node_down_complete[271] (( 0 != 0 )) :node_down_complete[282] : 'lsvg ' will show if a volume group is varied :node_down_complete[283] : on in passive mode. Any such are varied off :node_down_complete[285] cltime 2023-01-28T18:00:29.803254 :node_down_complete[286] varyoffvg datavg :node_down_complete[287] RC=0 :node_down_complete[288] cltime 2023-01-28T18:00:29.931843 :node_down_complete[289] : rc_varyoffvg = 0 :node_down_complete[291] : Force a timestamp update to get timestamps in sync :node_down_complete[292] : since timing may prevent LVM from doing so :node_down_complete[294] cl_update_vg_odm_ts -o -f datavg :cl_update_vg_odm_ts(0.000)[77] version=1.13 :cl_update_vg_odm_ts(0.000)[121] o_flag='' :cl_update_vg_odm_ts(0.000)[122] f_flag='' :cl_update_vg_odm_ts(0.000)[123] getopts :of option :cl_update_vg_odm_ts(0.000)[126] : Local timestamps should be good, since volume group was :cl_update_vg_odm_ts(0.001)[127] : just varyied on or off :cl_update_vg_odm_ts(0.001)[128] o_flag=TRUE :cl_update_vg_odm_ts(0.001)[123] getopts :of option :cl_update_vg_odm_ts(0.001)[131] : Update timestamps clusterwide, even if LVM support is in :cl_update_vg_odm_ts(0.001)[132] : place :cl_update_vg_odm_ts(0.001)[133] f_flag=TRUE :cl_update_vg_odm_ts(0.001)[123] getopts :of option :cl_update_vg_odm_ts(0.001)[142] shift 2 :cl_update_vg_odm_ts(0.001)[144] vg_name=datavg :cl_update_vg_odm_ts(0.001)[145] [[ -z datavg ]] :cl_update_vg_odm_ts(0.001)[151] shift :cl_update_vg_odm_ts(0.001)[152] node_list='' :cl_update_vg_odm_ts(0.001)[153] /usr/es/sbin/cluster/utilities/cl_get_path all :cl_update_vg_odm_ts(0.004)[153] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin :cl_update_vg_odm_ts(0.004)[155] [[ -z TRUE ]] :cl_update_vg_odm_ts(0.004)[214] found_new_ts='' :cl_update_vg_odm_ts(0.004)[217] : Try to update the volume group ODM time stamp on every other node :cl_update_vg_odm_ts(0.004)[218] : in the resource group that owns datavg :cl_update_vg_odm_ts(0.004)[220] [[ -z '' ]] :cl_update_vg_odm_ts(0.004)[223] : We were not given a node list. The node list is derived from :cl_update_vg_odm_ts(0.004)[224] : the resource group that the volume group is in. :cl_update_vg_odm_ts(0.004)[226] /usr/es/sbin/cluster/utilities/clodmget -q 'name like *VOLUME_GROUP and value = datavg' -f group -n HACMPresource :cl_update_vg_odm_ts(0.007)[226] group_name=epprd_rg :cl_update_vg_odm_ts(0.007)[227] [[ -n epprd_rg ]] :cl_update_vg_odm_ts(0.007)[230] : Find all other cluster nodes in the resource group that owns :cl_update_vg_odm_ts(0.007)[231] : the volume group datavg :cl_update_vg_odm_ts(0.007)[233] /usr/es/sbin/cluster/utilities/clodmget -q 'group = epprd_rg' -f nodes -n HACMPgroup :cl_update_vg_odm_ts(0.009)[233] node_list='epprda epprds' :cl_update_vg_odm_ts(0.009)[238] : Check to see if the volume group is known locally :cl_update_vg_odm_ts(0.009)[240] odmget -q 'name = datavg and PdDvLn = logical_volume/vgsubclass/vgtype' CuDv :cl_update_vg_odm_ts(0.011)[240] [[ -z $'\nCuDv:\n\tname = "datavg"\n\tstatus = 1\n\tchgstatus = 1\n\tddins = ""\n\tlocation = ""\n\tparent = ""\n\tconnwhere = ""\n\tPdDvLn = "logical_volume/vgsubclass/vgtype"' ]] :cl_update_vg_odm_ts(0.011)[272] : Get the vgid for volume group datavg :cl_update_vg_odm_ts(0.012)[274] getlvodm -v datavg :cl_update_vg_odm_ts(0.014)[274] vgid=00c44af100004b00000001851e9dc053 :cl_update_vg_odm_ts(0.014)[280] : Get the volume group timestamp for datavg :cl_update_vg_odm_ts(0.014)[281] : as currently saved in ODM :cl_update_vg_odm_ts(0.014)[283] getlvodm -T 00c44af100004b00000001851e9dc053 :cl_update_vg_odm_ts(0.016)[283] current_odm_ts=63d4e41f29287594 :cl_update_vg_odm_ts(0.017)[288] [[ TRUE != TRUE ]] :cl_update_vg_odm_ts(0.017)[346] : Is an update 'necessary?' :cl_update_vg_odm_ts(0.017)[348] [[ -n 'epprda epprds' ]] :cl_update_vg_odm_ts(0.017)[350] LOCALNODENAME=epprda :cl_update_vg_odm_ts(0.017)[351] LOCALNODENAME=epprda :cl_update_vg_odm_ts(0.017)[352] [[ -n epprda ]] :cl_update_vg_odm_ts(0.017)[355] : Skip the local node, since we have done that above. :cl_update_vg_odm_ts(0.018)[357] print 'epprda epprds' :cl_update_vg_odm_ts(0.020)[357] tr ' ' '\n' :cl_update_vg_odm_ts(0.021)[357] tr , '\n' :cl_update_vg_odm_ts(0.023)[357] grep -v -w -x epprda :cl_update_vg_odm_ts(0.024)[357] paste -s -d, - :cl_update_vg_odm_ts(0.026)[357] node_list=epprds :cl_update_vg_odm_ts(0.027)[365] : Update the time stamp on all those other nodes on which the :cl_update_vg_odm_ts(0.027)[366] : volume group is currently varied off. LVM will take care of :cl_update_vg_odm_ts(0.027)[367] : the others. :cl_update_vg_odm_ts(0.027)[369] [[ -n epprds ]] :cl_update_vg_odm_ts(0.027)[371] cl_on_node -cspoc '-f -n epprds' 'lsvg -o | grep -qx datavg || /usr/sbin/putlvodm -T 63d4e41f29287594 00c44af100004b00000001851e9dc053 && /usr/sbin/savebase > /dev/null' :cl_update_vg_odm_ts(0.027)[371] _CSPOC_CALLED_FROM_SMIT=true clhaver[576]: version 1.14 clhaver[591]: colon delimied output clhaver[612]: MINVER=6100 clhaver[624]: thread(epprds) clhaver[144]: cl_gethostbynode epprds cl_gethostbynode[102]: version 1.1 i_flag=0 given name is epprds cl_gethostbynode[127]: cl_query nodes=2 cl_gethostbynode[161]: epprds is a PowerHA node name cl_gethostbynode[313]: epprds is the CAA host matching PowerHA node epprds clhaver[157]: node epprds resolves to epprds clhaver[166]: cl_socket(COLLVER epprds epprds) clhaver[191]: cl_connect(epprds) clhaver[230]: read(epprds) epprds: :cl_rsh[99] version=1.4 epprds: :cl_rsh[102] CAA_node_name='' epprds: :cl_rsh[105] : Process optional flags epprds: :cl_rsh[107] cmd_flag=-n epprds: :cl_rsh[108] [[ -n == -n ]] epprds: :cl_rsh[111] : Remove the no standard input flag epprds: :cl_rsh[113] shift epprds: :cl_rsh[124] : Pick up and check the input epprds: :cl_rsh[126] print 'epprds /usr/es/sbin/cluster/cspoc/cexec eval gmhdhgghcacngpcahmcaghhcgfhacacnhbhicagegbhegbhgghcahmhmcacphfhdhccphdgcgjgocphahfhegmhggpgegncacnfecadgddgedegfdedbggdcdjdcdidhdfdjdecadadagddedegbggdbdadadadadegcdadadadadadadadbdidfdbgfdjgegddadfddcacgcgcacphfhdhccphdgcgjgocphdgbhggfgcgbhdgfcadocacpgegfhgcpgohfgmgm' epprds: :cl_rsh[126] read destination command epprds: :cl_rsh[127] [[ -z epprds ]] epprds: :cl_rsh[127] [[ -z '/usr/es/sbin/cluster/cspoc/cexec eval gmhdhgghcacngpcahmcaghhcgfhacacnhbhicagegbhegbhgghcahmhmcacphfhdhccphdgcgjgocphahfhegmhggpgegncacnfecadgddgedegfdedbggdcdjdcdidhdfdjdecadadagddedegbggdbdadadadadegcdadadadadadadadbdidfdbgfdjgegddadfddcacgcgcacphfhdhccphdgcgjgocphdgbhggfgcgbhdgfcadocacpgegfhgcpgohfgmgm' ]] epprds: :cl_rsh[136] /usr/es/sbin/cluster/utilities/cl_nn2hn epprds epprds: :cl_nn2hn[83] version=1.11 epprds: :cl_nn2hn[86] CAA_host_name='' epprds: :cl_nn2hn[86] typeset CAA_host_name epprds: :cl_nn2hn[87] node_name='' epprds: :cl_nn2hn[87] typeset node_name epprds: :cl_nn2hn[88] node_interfaces='' epprds: :cl_nn2hn[88] typeset node_interfaces epprds: :cl_nn2hn[89] COMM_PATH='' epprds: :cl_nn2hn[89] typeset COMM_PATH epprds: :cl_nn2hn[90] r_flag='' epprds: :cl_nn2hn[90] typeset r_flag epprds: :cl_nn2hn[93] : Pick up and check the input epprds: :cl_nn2hn[95] getopts r option epprds: :cl_nn2hn[106] : Pick up the destination, which follows the options epprds: :cl_nn2hn[108] shift 0 epprds: :cl_nn2hn[109] destination=epprds epprds: :cl_nn2hn[109] typeset destination epprds: :cl_nn2hn[111] [[ -z epprds ]] epprds: :cl_nn2hn[121] : In order to prevent recursion, first you must prevent recursion... epprds: :cl_nn2hn[123] [[ '' != TRUE ]] epprds: :cl_nn2hn[126] : This routine is not being called from cl_query_hn_id, so call it epprds: :cl_nn2hn[127] : to see if it can find the CAA host name based on a common short epprds: :cl_nn2hn[128] : id, or match on CAA host name, or match on CAA short name, or epprds: :cl_nn2hn[129] : similar match in /etc/cluster/rhosts. epprds: :cl_nn2hn[131] cl_query_hn_id -q -i epprds epprds: cl_query_hn_id[137]: version 1.2 epprds: cl_gethostbynode[102]: version 1.1 i_flag=105 given name is epprds epprds: cl_gethostbynode[127]: cl_query nodes=2 epprds: cl_gethostbynode[161]: epprds is a PowerHA node name epprds: cl_gethostbynode[313]: epprds is the CAA host matching PowerHA node epprds epprds: :cl_nn2hn[131] CAA_host_name=epprds epprds: :cl_nn2hn[132] RC=0 epprds: :cl_nn2hn[133] (( 0 == 0 )) epprds: :cl_nn2hn[136] : The straight forward tests worked! epprds: :cl_nn2hn[138] [[ epprds == @(+([0-9.])|+([0-9:])) ]] epprds: :cl_nn2hn[159] [[ -z epprds ]] epprds: :cl_nn2hn[340] [[ -z epprds ]] epprds: :cl_nn2hn[345] [[ -n epprds ]] epprds: :cl_nn2hn[348] : We have found epprds is our best guess at a CAA host name epprds: :cl_nn2hn[349] : corresponding to epprds epprds: :cl_nn2hn[351] print epprds epprds: :cl_nn2hn[352] return 0 epprds: :cl_rsh[136] CAA_node_name=epprds epprds: :cl_rsh[148] : Invoke clcomd epprds: :cl_rsh[150] /usr/sbin/clrsh epprds -n '/usr/es/sbin/cluster/cspoc/cexec eval gmhdhgghcacngpcahmcaghhcgfhacacnhbhicagegbhegbhgghcahmhmcacphfhdhccphdgcgjgocphahfhegmhggpgegncacnfecadgddgedegfdedbggdcdjdcdidhdfdjdecadadagddedegbggdbdadadadadegcdadadadadadadadbdidfdbgfdjgegddadfddcacgcgcacphfhdhccphdgcgjgocphdgbhggfgcgbhdgfcadocacpgegfhgcpgohfgmgm' epprds: :cl_rsh[151] return 0 :cl_update_vg_odm_ts(0.504)[375] return 0 :node_down_complete[297] : If VG fencing is in place, restore the fence height to read/only. :node_down_complete[299] cl_set_vg_fence_height -c datavg ro cl_set_vg_fence_height[126]: version @(#)10 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 cl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY) cl_set_vg_fence_height[214]: read(datavg, 16) cl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=ro(2)) :node_down_complete[300] RC=0 :node_down_complete[301] : return code from volume group fencing is 0 :node_down_complete[302] (( 0 != 0 )) :node_down_complete[315] : remove the flag file used to indicate reconfig_resources :node_down_complete[317] rm -f /usr/es/sbin/cluster/etc/.hacmp_wlm_config_changed :node_down_complete[320] : Run WLM stop script :node_down_complete[322] cl_wlm_stop :cl_wlm_stop[+55] version=%I% :cl_wlm_stop[+59] :cl_wlm_stop[+59] clwlmruntime -l :cl_wlm_stop[+59] awk BEGIN { FS = ":" } $1 !~ /^#.*/ { print $1 } HA_WLM_CONFIG=HA_WLM_config :cl_wlm_stop[+60] [[ -z HA_WLM_config ]] :cl_wlm_stop[+69] wlmcntrl -q WLM is stopped :cl_wlm_stop[+70] WLM_IS_RUNNING=1 :cl_wlm_stop[+72] WLM_CONFIG_FILES=classes shares limits rules :cl_wlm_stop[+74] PREV_WLM_CONFIG= :cl_wlm_stop[+76] HA_STARTED_WLM=false :cl_wlm_stop[+78] [[ -e /etc/wlm/HA_WLM_config/HA_prev_config_subdir ]] :cl_wlm_stop[+86] [[ -e /etc/wlm/HA_WLM_config/classes.prev ]] :cl_wlm_stop[+86] [[ -e /etc/wlm/HA_WLM_config/shares.prev ]] :cl_wlm_stop[+86] [[ -e /etc/wlm/HA_WLM_config/limits.prev ]] :cl_wlm_stop[+86] [[ -e /etc/wlm/HA_WLM_config/rules.prev ]] :cl_wlm_stop[+107] [[ -n ]] :cl_wlm_stop[+107] [[ true = false ]] :cl_wlm_stop[+144] exit 0 :node_down_complete[330] [[ epprda == epprda ]] :node_down_complete[333] : Node is down: Create the lock file that inhibits node halt :node_down_complete[335] /bin/touch /usr/es/sbin/cluster/etc/ha_nodehalt.lock :node_down_complete[339] : If this is the last node to leave, restore read write access to all volume groups :node_down_complete[341] [[ '' != forced ]] :node_down_complete[343] [[ -z epprds ]] :node_down_complete[392] [[ epprda == epprda ]] :node_down_complete[395] : Node is gracefully going down. :node_down_complete[397] clodmget -n -q policy=scsi -f value HACMPsplitmerge :node_down_complete[397] SCSIPR_ENABLED='' :node_down_complete[397] typeset SCSIPR_ENABLED :node_down_complete[398] [[ '' == Yes ]] :node_down_complete[452] : refresh clcomd, FWIW :node_down_complete[454] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. :node_down_complete[459] : This is the final info of all RGs: :node_down_complete[461] clRGinfo -p -t :node_down_complete[461] 2>& 1 clRGinfo[431]: version I clRGinfo[517]: Number of resource groups = 0 clRGinfo[562]: cluster epprda_cluster is version = 22 clRGinfo[597]: no resource groups specified on command line - print all clRGinfo[685]: Current group is 'epprd_rg' get primary state info for state 4 get secondary state info for state 4 getPrimaryStateStr: using primary_table => primary_state_table get primary state info for state 4 get secondary state info for state 4 getPrimaryStateStr: using primary_table => primary_state_table Cluster Name: epprda_cluster Resource Group Name: epprd_rg Node Group State Delayed Timers ---------------------------------------------------------------- --------------- ------------------- epprda OFFLINE epprds OFFLINE :node_down_complete[463] return 0 Jan 28 2023 18:00:30 EVENT COMPLETED: node_down_complete epprda 0 |2023-01-28T18:00:30|22170|EVENT COMPLETED: node_down_complete epprda 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:00:30.579460 + echo '|2023-01-28T18:00:30.579460|INFO: node_down_complete|epprda|0' + 1>> /var/hacmp/availability/clavailability.log clexit.rc : Normal termination of clstrmgrES. Restart now. 0513-059 The clstrmgrES Subsystem has been started. Subsystem PID is 26607896. Jan 28 2023 18:03:26 EVENT START: admin_op clrm_start_request 8559 0 |2023-01-28T18:03:26|8559|EVENT START: admin_op clrm_start_request 8559 0| :admin_op[110] trap sigint_handler INT :admin_op[116] OP_TYPE=clrm_start_request :admin_op[116] typeset OP_TYPE :admin_op[117] SERIAL=8559 :admin_op[117] typeset -li SERIAL :admin_op[118] INVALID=0 :admin_op[118] typeset -li INVALID The administrator initiated the following action at Sat Jan 28 18:03:26 KORST 2023 Check smit.log and clutils.log for additional details. Starting PowerHA cluster services on node: epprda in normal mode... Jan 28 2023 18:03:29 EVENT COMPLETED: admin_op clrm_start_request 8559 0 0 |2023-01-28T18:03:29|8559|EVENT COMPLETED: admin_op clrm_start_request 8559 0 0| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 8560 Cluster services started on node 'epprda' Enqueued rg_move acquire event for resource group epprd_rg. Node Up Completion Event has been enqueued. ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_NODE_DEP|2023-01-28T18:03:31|8560| |CLUSTER_RG_MOVE_ACQUIRE|epprd_rg| |NODE_UP_COMPLETE| |EVENT_PREAMBLE_END| Jan 28 2023 18:03:34 EVENT START: node_up epprda |2023-01-28T18:03:34|8560|EVENT START: node_up epprda| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:03:34.209427 + echo '|2023-01-28T18:03:34.209427|INFO: node_up|epprda' + 1>> /var/hacmp/availability/clavailability.log :node_up[182] version=%I% :node_up[185] NODENAME=epprda :node_up[185] export NODENAME :node_up[193] STATUS=0 :node_up[193] typeset -li STATUS :node_up[194] RC=0 :node_up[194] typeset -li RC :node_up[195] ENABLE_NFS_CROSS_MOUNT=false :node_up[196] START_MODE='' :node_up[196] typeset START_MODE :node_up[198] set -u :node_up[200] (( 1 < 1 )) :node_up[200] (( 1 > 2 )) :node_up[207] : serial number for this event is 8560 :node_up[210] [[ epprda == epprda ]] :node_up[213] : Remove the node halt lock file. :node_up[214] : Hereafter, clstrmgr failure leads to node halt :node_up[216] rm -f /usr/es/sbin/cluster/etc/ha_nodehalt.lock :node_up[219] (( 1 > 1 )) :node_up[256] : If RG_DEPENDENCIES=false, process RGs with clsetenvgrp :node_up[258] [[ TRUE == FALSE ]] :node_up[281] : localnode processing prior to RG acquisition :node_up[283] [[ epprda == epprda ]] :node_up[283] [[ '' != forced ]] :node_up[286] : Reserve Volume Groups using SCSIPR :node_up[288] clodmget -n -q policy=scsi -f value HACMPsplitmerge :node_up[288] SCSIPR_ENABLED='' :node_up[288] typeset SCSIPR_ENABLED :node_up[289] [[ '' == Yes ]] :node_up[334] : Setup VG fencing. This must be done prior to any potential disk access. :node_up[336] node_up_vg_fence_init :node_up[node_up_vg_fence_init:73] typeset VGs_on_line :node_up[node_up_vg_fence_init:74] typeset VG_name :node_up[node_up_vg_fence_init:75] typeset VG_ID :node_up[node_up_vg_fence_init:76] typeset VG_PV_list :node_up[node_up_vg_fence_init:79] : Find out what volume groups are currently on-line :node_up[node_up_vg_fence_init:81] lsvg -L -o :node_up[node_up_vg_fence_init:81] 2> /var/hacmp/log/node_up.lsvg.err :node_up[node_up_vg_fence_init:81] print caavg_private rootvg :node_up[node_up_vg_fence_init:81] VGs_on_line='caavg_private rootvg' :node_up[node_up_vg_fence_init:82] [[ -e /var/hacmp/log/node_up.lsvg.err ]] :node_up[node_up_vg_fence_init:82] [[ ! -s /var/hacmp/log/node_up.lsvg.err ]] :node_up[node_up_vg_fence_init:82] rm /var/hacmp/log/node_up.lsvg.err :node_up[node_up_vg_fence_init:85] : Clean up any old fence group files and stale fence groups. :node_up[node_up_vg_fence_init:86] : These are all of the form '/usr/es/sbin/cluster/etc/vg/.uud' :node_up[node_up_vg_fence_init:88] valid_vg_lst='' :node_up[node_up_vg_fence_init:89] lsvg -L :node_up[node_up_vg_fence_init:89] egrep -vw 'rootvg|caavg_private' :node_up[node_up_vg_fence_init:89] 2>> /var/hacmp/log/node_up.lsvg.err :node_up:datavg[node_up_vg_fence_init:91] PS4_LOOP=datavg :node_up:datavg[node_up_vg_fence_init:92] clodmget -q $'name like \'*VOLUME_GROUP\' and value = datavg' -f value -n HACMPresource :node_up:datavg[node_up_vg_fence_init:92] [[ -z datavg ]] :node_up:datavg[node_up_vg_fence_init:109] : Volume group datavg is an HACMP resource :node_up:datavg[node_up_vg_fence_init:111] [[ 'caavg_private rootvg' == ?(*\ )datavg?(\ *) ]] :node_up:datavg[node_up_vg_fence_init:115] fence_height=ro :node_up:datavg[node_up_vg_fence_init:119] : Recreate the fence group to match current volume group membership :node_up:datavg[node_up_vg_fence_init:121] cl_vg_fence_redo -c datavg ro :cl_vg_fence_redo[52] version=1.3 :cl_vg_fence_redo[55] RC=0 :cl_vg_fence_redo[55] typeset -li RC :cl_vg_fence_redo[58] : Check for optional -c parameter :cl_vg_fence_redo[60] [[ -c == -c ]] :cl_vg_fence_redo[62] c_flag=-c :cl_vg_fence_redo[63] shift :cl_vg_fence_redo[66] VG=datavg :cl_vg_fence_redo[67] UUID_file=/usr/es/sbin/cluster/etc/vg/datavg.uuid :cl_vg_fence_redo[68] fence_height=ro :cl_vg_fence_redo[70] [[ -s /usr/es/sbin/cluster/etc/vg/datavg.uuid ]] :cl_vg_fence_redo[83] [[ -z ro ]] :cl_vg_fence_redo[98] : Rebuild the fence group for datavg :cl_vg_fence_redo[99] : First, find the disks in the volume group :cl_vg_fence_redo[101] /usr/sbin/getlvodm -v datavg :cl_vg_fence_redo[101] VGID=00c44af100004b00000001851e9dc053 :cl_vg_fence_redo[103] [[ -n 00c44af100004b00000001851e9dc053 ]] :cl_vg_fence_redo[106] : Create a fence group for datavg :cl_vg_fence_redo[108] /usr/sbin/getlvodm -w 00c44af100004b00000001851e9dc053 :cl_vg_fence_redo[108] cut -f2 '-d ' :cl_vg_fence_redo[108] PV_disk_list=$'hdisk2\nhdisk3\nhdisk4\nhdisk5\nhdisk6\nhdisk7\nhdisk8' :cl_vg_fence_redo[109] cl_vg_fence_init -c datavg ro hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8 cl_vg_fence_init[145]: version @(#) 7d4c34b 43haes/usr/sbin/cluster/events/utils/cl_vg_fence_init.c, 726, 2147A_aha726, Feb 05 2021 09:50 PM cl_vg_fence_init[204]: odm_initialize() cl_vg_fence_init[231]: calloc(7, 64) cl_vg_fence_init[259]: getattr(hdisk2, PCM) = PCM/friend/fcpother cl_vg_fence_init[259]: getattr(hdisk3, PCM) = PCM/friend/fcpother cl_vg_fence_init[259]: getattr(hdisk4, PCM) = PCM/friend/fcpother cl_vg_fence_init[259]: getattr(hdisk5, PCM) = PCM/friend/fcpother cl_vg_fence_init[259]: getattr(hdisk6, PCM) = PCM/friend/fcpother cl_vg_fence_init[259]: getattr(hdisk7, PCM) = PCM/friend/fcpother cl_vg_fence_init[259]: getattr(hdisk8, PCM) = PCM/friend/fcpother cl_vg_fence_init[294]: sfwAddFenceGroup(datavg, 7, hdisk2, hdisk3, hdisk4, hdisk5, hdisk6, hdisk7, hdisk8) cl_vg_fence_init[374]: free(200101b8) cl_vg_fence_init[400]: creat(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_vg_fence_init[408]: write(/usr/es/sbin/cluster/etc/vg/datavg.uuid, 16) cl_g_fence_init[442]: sfwSetFenceGroup(vg=datavg, height=ro(2) uuid=ec2db4422261eae02091227fb9e53c88):cl_vg_fence_redo[110] RC=0 :cl_vg_fence_redo[111] : Exit status is 0 from cl_vg_fence_init datavg ro hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8 :cl_vg_fence_redo[113] (( 0 != 0 )) :cl_vg_fence_redo[123] return 0 :node_up:datavg[node_up_vg_fence_init:122] valid_vg_lst=' datavg' :node_up:datavg[node_up_vg_fence_init:125] [[ -e /var/hacmp/log/node_up.lsvg.err ]] :node_up:datavg[node_up_vg_fence_init:125] [[ ! -s /var/hacmp/log/node_up.lsvg.err ]] :node_up:datavg[node_up_vg_fence_init:125] rm /var/hacmp/log/node_up.lsvg.err :node_up:datavg[node_up_vg_fence_init:128] : Any remaining old fence group files are from stale fence groups, :node_up:datavg[node_up_vg_fence_init:129] : so remove them :node_up:datavg[node_up_vg_fence_init:131] [[ -s /usr/es/sbin/cluster/etc/vg/datavg.uuid ]] :node_up:datavg[node_up_vg_fence_init:133] ls /usr/es/sbin/cluster/etc/vg/datavg.uuid :node_up:/usr/es/sbin/cluster/etc/vg/datavg.uuid[node_up_vg_fence_init:135] PS4_LOOP=/usr/es/sbin/cluster/etc/vg/datavg.uuid :node_up:/usr/es/sbin/cluster/etc/vg/datavg.uuid[node_up_vg_fence_init:136] VG_name=datavg.uuid :node_up:/usr/es/sbin/cluster/etc/vg/datavg.uuid[node_up_vg_fence_init:137] VG_name=datavg :node_up:/usr/es/sbin/cluster/etc/vg/datavg.uuid[node_up_vg_fence_init:138] [[ ' datavg' == ?(*\ )datavg?(\ *) ]] :node_up:/usr/es/sbin/cluster/etc/vg/datavg.uuid[node_up_vg_fence_init:141] : Just redid the fence group for datavg :node_up:/usr/es/sbin/cluster/etc/vg/datavg.uuid[node_up_vg_fence_init:143] continue :node_up:/usr/es/sbin/cluster/etc/vg/datavg.uuid[node_up_vg_fence_init:158] unset PS4_LOOP :node_up[node_up_vg_fence_init:160] return 0 :node_up[344] : If WLM manager clases have been configured for an application server, process them now :node_up[346] clodmget -q $'name like \'WLM_*\'' -f id HACMPresource :node_up[346] [[ -n '' ]] :node_up[371] : Call ss-load replicated resource methods if they are defined :node_up[373] cl_rrmethods2call ss_load :cl_rrmethods2call[56] version=%I% :cl_rrmethods2call[84] RRMETHODS='' :cl_rrmethods2call[85] NEED_RR_ENV_VARS=no :cl_rrmethods2call[104] : The load and unload methods if defined are returned on the :cl_rrmethods2call[105] : local node :cl_rrmethods2call[107] [[ epprda == epprda ]] :cl_rrmethods2call[109] NEED_RR_ENV_VARS=yes :cl_rrmethods2call[129] : Set the '*_REP_RESOURCE' variables if needed. :cl_rrmethods2call[131] [[ yes == yes ]] :cl_rrmethods2call[133] cllsres :cl_rrmethods2call[133] 2> /dev/null :cl_rrmethods2call[133] eval APPLICATIONS='"epprd_app"' EXPORT_FILESYSTEM='"/board_org' '/sapmnt/EPP"' FILESYSTEM='""' FORCED_VARYON='"false"' FSCHECK_TOOL='"fsck"' FS_BEFORE_IPADDR='"false"' MOUNT_FILESYSTEM='"/board;/board_org"' RECOVERY_METHOD='"sequential"' SERVICE_LABEL='"epprd"' SSA_DISK_FENCING='"false"' VG_AUTO_IMPORT='"false"' VOLUME_GROUP='"datavg"' USERDEFINED_RESOURCES='""' :cl_rrmethods2call[1] APPLICATIONS=epprd_app :cl_rrmethods2call[1] EXPORT_FILESYSTEM='/board_org /sapmnt/EPP' :cl_rrmethods2call[1] FILESYSTEM='' :cl_rrmethods2call[1] FORCED_VARYON=false :cl_rrmethods2call[1] FSCHECK_TOOL=fsck :cl_rrmethods2call[1] FS_BEFORE_IPADDR=false :cl_rrmethods2call[1] MOUNT_FILESYSTEM='/board;/board_org' :cl_rrmethods2call[1] RECOVERY_METHOD=sequential :cl_rrmethods2call[1] SERVICE_LABEL=epprd :cl_rrmethods2call[1] SSA_DISK_FENCING=false :cl_rrmethods2call[1] VG_AUTO_IMPORT=false :cl_rrmethods2call[1] VOLUME_GROUP=datavg :cl_rrmethods2call[1] USERDEFINED_RESOURCES='' :cl_rrmethods2call[137] [[ -n '' ]] :cl_rrmethods2call[142] [[ -n '' ]] :cl_rrmethods2call[147] [[ -n '' ]] :cl_rrmethods2call[152] [[ -n '' ]] :cl_rrmethods2call[157] [[ -n '' ]] :cl_rrmethods2call[162] [[ -n '' ]] :cl_rrmethods2call[167] [[ -n '' ]] :cl_rrmethods2call[172] [[ -n '' ]] :cl_rrmethods2call[182] [[ -z '' ]] :cl_rrmethods2call[184] typeset sysmgdata :cl_rrmethods2call[185] typeset reposmgdata :cl_rrmethods2call[186] [[ -x /usr/es/sbin/cluster/xd_generic/xd_cli/clxd_list_mg_smit ]] :cl_rrmethods2call[191] [[ -n '' ]] :cl_rrmethods2call[191] [[ -n '' ]] :cl_rrmethods2call[197] echo '' :cl_rrmethods2call[199] return 0 :node_up[373] METHODS='' :node_up[387] : When the local node is brought up, reset the resource locator info. :node_up[390] clchdaemons -r -d clstrmgr_scripts -t resource_locator :node_up[397] [[ '' != manual ]] :node_up[400] : attempt passive varyon for any ECM VGs in serial RGs :node_up[405] cl_pvo :cl_pvo[590] version=1.34.2.12 :cl_pvo(0.007)[592] PS4_TIMER=true :cl_pvo(0.007)[594] rc=0 :cl_pvo(0.007)[594] typeset -li rc :cl_pvo(0.007)[595] mode=0 :cl_pvo(0.007)[595] typeset -li mode :cl_pvo(0.007)[600] ENODEV=19 :cl_pvo(0.008)[600] typeset -li ENODEV :cl_pvo(0.008)[601] vg_force_on_flag='' :cl_pvo(0.008)[605] : Pick up any passed options :cl_pvo(0.008)[607] rg_list='' :cl_pvo(0.008)[607] export rg_list :cl_pvo(0.008)[608] vg_list='' :cl_pvo(0.008)[609] fs_list='' :cl_pvo(0.008)[610] all_vgs_flag='' :cl_pvo(0.008)[611] [[ -z '' ]] :cl_pvo(0.008)[613] all_vgs_flag=true :cl_pvo(0.008)[615] getopts :g:v:f: option :cl_pvo(0.008)[629] shift 0 :cl_pvo(0.008)[630] [[ -n '' ]] :cl_pvo(0.008)[645] O_flag='' :cl_pvo(0.008)[646] odmget -q 'attribute = varyon_state' PdAt :cl_pvo(0.010)[646] [[ -n $'\nPdAt:\n\tuniquetype = "logical_volume/vgsubclass/vgtype"\n\tattribute = "varyon_state"\n\tdeflt = "0"\n\tvalues = "0,1,2,3"\n\twidth = ""\n\ttype = "R"\n\tgeneric = ""\n\trep = "l"\n\tnls_index = 0' ]] :cl_pvo(0.010)[649] : LVM may record that a volume group was varied on from an earlier :cl_pvo(0.010)[650] : IPL. Rely on HA state tracking, and override the LVM check :cl_pvo(0.010)[652] O_flag=-O :cl_pvo(0.010)[655] [[ -n true ]] :cl_pvo(0.010)[657] [[ -z epprda ]] :cl_pvo(0.010)[661] [[ -z epprda ]] :cl_pvo(0.010)[672] : Since no resource names of any type were explicitly passed, go :cl_pvo(0.010)[673] : find all the resource groups this node is a member of. :cl_pvo(0.012)[675] clodmget -f group,nodes HACMPgroup :cl_pvo(0.015)[675] egrep '[: ]epprda( |$)' :cl_pvo(0.016)[675] cut -f1 -d: :cl_pvo(0.019)[675] rg_list=epprd_rg :cl_pvo(0.019)[676] [[ -z epprd_rg ]] :cl_pvo(0.019)[686] [[ -z '' ]] :cl_pvo(0.019)[686] [[ -n epprd_rg ]] :cl_pvo(0.019)[689] : Since no volume groups were passed, go find all the volume groups :cl_pvo(0.019)[690] : in the given/extracted list of resource groups. :cl_pvo(0.019)[695] : For each resource group that this node participates in, get the :cl_pvo(0.019)[696] : list of serial access volume groups in that resource group. :cl_pvo(0.019)[698] clodmget -q 'group = epprd_rg and name = VOLUME_GROUP' -f value -n HACMPresource :cl_pvo(0.022)[698] rg_vg_list=datavg :cl_pvo(0.022)[700] [[ -n datavg ]] :cl_pvo(0.022)[702] [[ -n true ]] :cl_pvo(0.022)[703] odmget -q $'group = epprd_rg and name like \'*REP_RESOURCE\'' HACMPresource :cl_pvo(0.024)[703] [[ -n '' ]] :cl_pvo(0.024)[739] : If there were any serial access volume groups for this node and :cl_pvo(0.024)[740] : that resource group, add them to the list. :cl_pvo(0.024)[742] vg_list=datavg :cl_pvo(0.024)[747] [[ -z '' ]] :cl_pvo(0.024)[747] [[ -n epprd_rg ]] :cl_pvo(0.024)[750] : Since no file systems were passed, go find all the file systems in :cl_pvo(0.024)[751] : the given/extracted list of resource groups. :cl_pvo(0.024)[755] : For each resource group that this node participates in, get the :cl_pvo(0.024)[756] : list of file systems in that resource group. :cl_pvo(0.024)[761] clodmget -q 'group = epprd_rg and name = FILESYSTEM' -f value -n HACMPresource :cl_pvo(0.027)[761] rg_fs_list=ALL :cl_pvo(0.027)[763] [[ -n ALL ]] :cl_pvo(0.027)[765] [[ -n true ]] :cl_pvo(0.027)[766] odmget -q $'group = epprd_rg and name like \'*REP_RESOURCE\'' HACMPresource :cl_pvo(0.029)[766] [[ -n '' ]] :cl_pvo(0.029)[780] : If there were any file systems for this node and that resource :cl_pvo(0.029)[781] : group, add them to the list :cl_pvo(0.029)[783] fs_list=ALL :cl_pvo(0.029)[790] [[ ALL == ALL ]] :cl_pvo(0.029)[792] continue :cl_pvo(0.029)[801] : Remove any duplicates from the volume group list :cl_pvo(0.031)[803] echo datavg :cl_pvo(0.033)[803] tr ' ' '\n' :cl_pvo(0.034)[803] sort -u :cl_pvo(0.038)[803] vg_list=datavg :cl_pvo(0.038)[805] [[ -z datavg ]] :cl_pvo(0.038)[814] : Find out what volume groups are currently on-line :cl_pvo(0.039)[816] lsvg -L -o :cl_pvo(0.039)[816] 2> /tmp/lsvg.err :cl_pvo(0.042)[816] print caavg_private rootvg :cl_pvo(0.042)[816] ON_LIST='caavg_private rootvg' :cl_pvo(0.042)[819] : If this node is the first node up in the cluster, :cl_pvo(0.042)[820] : we want to do a sync for each of the volume groups :cl_pvo(0.042)[821] : we bring on-line. If multiple cluster nodes are already active, the :cl_pvo(0.042)[822] : sync is unnecesary, having been done once, and possibly disruptive. :cl_pvo(0.042)[824] [[ -n '' ]] :cl_pvo(0.042)[833] : No other cluster nodes are present, default to sync just to be sure :cl_pvo(0.042)[834] : the volume group is in a good state :cl_pvo(0.042)[836] syncflag='' :cl_pvo(0.042)[840] : Now, process each volume group in the list of those this node acceses. :cl_pvo(0.042):datavg[844] PS4_LOOP=datavg :cl_pvo(0.042):datavg[844] typeset PS4_LOOP :cl_pvo(0.042):datavg[846] : Skip any concurrent GMVGs, they should never be pvo. :cl_pvo(0.042):datavg[848] odmget -q name='GMVG_REP_RESOURCE AND value=datavg' HACMPresource :cl_pvo(0.045):datavg[848] [[ -n '' ]] :cl_pvo(0.045):datavg[853] : The VGID is what the LVM low level commands used below use to :cl_pvo(0.045):datavg[854] : identify the volume group. :cl_pvo(0.045):datavg[856] /usr/sbin/getlvodm -v datavg :cl_pvo(0.047):datavg[856] vgid=00c44af100004b00000001851e9dc053 :cl_pvo(0.047):datavg[860] mode=99 :cl_pvo(0.047):datavg[863] : Attempt to determine the mode of the volume group - is it an :cl_pvo(0.047):datavg[864] : enhanced concurrent mode volume group or not. :cl_pvo(0.047):datavg[868] export mode :cl_pvo(0.047):datavg[869] hdisklist='' :cl_pvo(0.048):datavg[870] /usr/sbin/getlvodm -w 00c44af100004b00000001851e9dc053 :cl_pvo(0.050):datavg[870] read pvid hdisk :cl_pvo(0.050):datavg[871] hdisklist=hdisk2 :cl_pvo(0.050):datavg[870] read pvid hdisk :cl_pvo(0.051):datavg[871] hdisklist='hdisk2 hdisk3' :cl_pvo(0.051):datavg[870] read pvid hdisk :cl_pvo(0.051):datavg[871] hdisklist='hdisk2 hdisk3 hdisk4' :cl_pvo(0.051):datavg[870] read pvid hdisk :cl_pvo(0.051):datavg[871] hdisklist='hdisk2 hdisk3 hdisk4 hdisk5' :cl_pvo(0.051):datavg[870] read pvid hdisk :cl_pvo(0.051):datavg[871] hdisklist='hdisk2 hdisk3 hdisk4 hdisk5 hdisk6' :cl_pvo(0.051):datavg[870] read pvid hdisk :cl_pvo(0.051):datavg[871] hdisklist='hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7' :cl_pvo(0.051):datavg[870] read pvid hdisk :cl_pvo(0.051):datavg[871] hdisklist='hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8' :cl_pvo(0.051):datavg[870] read pvid hdisk :cl_pvo(0.051):datavg[873] get_vg_mode 'hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8' 00c44af100004b00000001851e9dc053 datavg :cl_pvo(0.051):datavg[get_vg_mode:289] typeset vgid vg_name syncflag hdisklist :cl_pvo(0.051):datavg[get_vg_mode:290] typeset GROUP_NAME FORCED_VARYON :cl_pvo(0.051):datavg[get_vg_mode:291] TUR_RC=0 :cl_pvo(0.051):datavg[get_vg_mode:291] typeset -li TUR_RC :cl_pvo(0.051):datavg[get_vg_mode:292] vg_disks=0 :cl_pvo(0.051):datavg[get_vg_mode:292] typeset -li vg_disks :cl_pvo(0.051):datavg[get_vg_mode:293] max_disk_test=0 :cl_pvo(0.051):datavg[get_vg_mode:293] typeset -li max_disk_test :cl_pvo(0.051):datavg[get_vg_mode:294] disk_tested=0 :cl_pvo(0.051):datavg[get_vg_mode:294] typeset -li disk_tested :cl_pvo(0.051):datavg[get_vg_mode:296] hdisklist='hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8' :cl_pvo(0.051):datavg[get_vg_mode:297] vgid=00c44af100004b00000001851e9dc053 :cl_pvo(0.051):datavg[get_vg_mode:298] vg_name=datavg :cl_pvo(0.051):datavg[get_vg_mode:299] syncflag='' :cl_pvo(0.051):datavg[get_vg_mode:301] odmget -q name='datavg and attribute=conc_capable and value=y' CuAt :cl_pvo(0.052):datavg[get_vg_mode:301] ODMDIR=/etc/objrepos :cl_pvo(0.054):datavg[get_vg_mode:301] [[ -n $'\nCuAt:\n\tname = "datavg"\n\tattribute = "conc_capable"\n\tvalue = "y"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "l"\n\tnls_index = 0' ]] :cl_pvo(0.054):datavg[get_vg_mode:304] : If LVM thinks that this volume group is concurrent capable, that :cl_pvo(0.054):datavg[get_vg_mode:305] : is good enough :cl_pvo(0.054):datavg[get_vg_mode:307] mode=32 :cl_pvo(0.054):datavg[get_vg_mode:308] return :cl_pvo(0.054):datavg[876] : See if the volume group is already on line. This should :cl_pvo(0.054):datavg[877] : only happen if it were manually brought on line outside of HACMP :cl_pvo(0.054):datavg[878] : control, or left on-line after a forced down. :cl_pvo(0.054):datavg[880] vg_on_mode='' :cl_pvo(0.054):datavg[880] typeset vg_on_mode :cl_pvo(0.054):datavg[881] [[ 'caavg_private rootvg' == ?(*\ )datavg?(\ *) ]] :cl_pvo(0.055):datavg[891] lsvg -L datavg :cl_pvo(0.055):datavg[891] 2> /dev/null :cl_pvo(0.057):datavg[891] grep -q -i -w passive-only :cl_pvo(0.059):datavg[896] [[ -n '' ]] :cl_pvo(0.059):datavg[976] : Volume group is currently not on line in any mode :cl_pvo(0.059):datavg[978] (( 99 == 32 )) :cl_pvo(0.060):datavg[1041] (( 32 != 32 && 99 != 32 )) :cl_pvo(0.060):datavg[1060] (( 32 == 32 )) :cl_pvo(0.060):datavg[1063] : If this is actually an enhanced concurrent mode volume group, :cl_pvo(0.060):datavg[1064] : bring it on line in passive mode. Other kinds are just skipped. :cl_pvo(0.060):datavg[1066] varyonp datavg 'hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8' :cl_pvo(0.060):datavg[varyonp:417] NOQUORUM=20 :cl_pvo(0.060):datavg[varyonp:417] typeset -li NOQUORUM :cl_pvo(0.060):datavg[varyonp:418] rc=0 :cl_pvo(0.060):datavg[varyonp:418] typeset -li rc :cl_pvo(0.060):datavg[varyonp:421] : Pick up passed parameters: volume group and sync flag :cl_pvo(0.060):datavg[varyonp:423] typeset syncflag hdisklist vg :cl_pvo(0.060):datavg[varyonp:424] vg=datavg :cl_pvo(0.060):datavg[varyonp:425] hdisklist='hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8' :cl_pvo(0.060):datavg[varyonp:426] syncflag='' :cl_pvo(0.060):datavg[varyonp:429] : Make sure the volume group is not fenced. Varyon requires read write :cl_pvo(0.060):datavg[varyonp:430] : access. :cl_pvo(0.060):datavg[varyonp:432] cl_set_vg_fence_height -c datavg rw cl_set_vg_fence_height[126]: version @(#)10 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 cl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY) cl_set_vg_fence_height[214]: read(datavg, 16) cl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0)) :cl_pvo(0.063):datavg[varyonp:433] RC=0 :cl_pvo(0.063):datavg[varyonp:434] (( 19 == 0 )) :cl_pvo(0.063):datavg[varyonp:442] : Return code from volume group fencing for datavg is 0 :cl_pvo(0.063):datavg[varyonp:443] (( 0 != 0 )) :cl_pvo(0.063):datavg[varyonp:455] : Try to vary on the volume group in passive concurrent mode :cl_pvo(0.063):datavg[varyonp:457] varyonvg -c -P -O datavg :cl_pvo(0.528):datavg[varyonp:458] rc=0 :cl_pvo(0.528):datavg[varyonp:460] (( 0 != 0 )) :cl_pvo(0.528):datavg[varyonp:483] : exit status of varyonvg -c -P -O datavg is: 0 :cl_pvo(0.528):datavg[varyonp:485] (( 0 == 20 )) :cl_pvo(0.528):datavg[varyonp:505] : If varyon was ultimately unsuccessful, note the error :cl_pvo(0.528):datavg[varyonp:507] (( 0 != 0 )) :cl_pvo(0.528):datavg[varyonp:511] : If varyonvg was successful, try to recover :cl_pvo(0.528):datavg[varyonp:512] : any missing or removed disks :cl_pvo(0.528):datavg[varyonp:514] mr_recovery datavg :cl_pvo(0.528):datavg[mr_recovery:59] vg=datavg :cl_pvo(0.528):datavg[mr_recovery:59] typeset vg :cl_pvo(0.528):datavg[mr_recovery:60] typeset mr_disks :cl_pvo(0.528):datavg[mr_recovery:61] typeset disk_list :cl_pvo(0.528):datavg[mr_recovery:62] typeset hdisk :cl_pvo(0.530):datavg[mr_recovery:64] lsvg -p datavg :cl_pvo(0.530):datavg[mr_recovery:64] 2> /dev/null :cl_pvo(0.531):datavg[mr_recovery:64] grep -iw missing :cl_pvo(0.551):datavg[mr_recovery:64] missing_disks='' :cl_pvo(0.551):datavg[mr_recovery:66] [[ -n '' ]] :cl_pvo(0.553):datavg[mr_recovery:89] lsvg -p datavg :cl_pvo(0.553):datavg[mr_recovery:89] 2> /dev/null :cl_pvo(0.555):datavg[mr_recovery:89] grep -iw removed :cl_pvo(0.574):datavg[mr_recovery:89] removed_disks='' :cl_pvo(0.574):datavg[mr_recovery:91] [[ -n '' ]] :cl_pvo(0.574):datavg[varyonp:518] : Restore the fence height to read only, for passive varyon :cl_pvo(0.574):datavg[varyonp:520] cl_set_vg_fence_height -c datavg ro cl_set_vg_fence_height[126]: version @(#)10 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 cl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY) cl_set_vg_fence_height[214]: read(datavg, 16) cl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=ro(2)) :cl_pvo(0.577):datavg[varyonp:521] RC=0 :cl_pvo(0.577):datavg[varyonp:522] : Return code from volume group fencing for datavg is 0 :cl_pvo(0.577):datavg[varyonp:523] (( 0 != 0 )) :cl_pvo(0.577):datavg[varyonp:533] return 0 :cl_pvo(0.577):datavg[1073] return 0 :node_up[406] : exit status of cl_pvo is: 0 :node_up[422] ls '/dev/vpath*' :node_up[422] 1> /dev/null 2>& 1 :node_up[432] : Configure any split and merge policies. :node_up[434] rm -f /usr/es/sbin/cluster/etc/smm_oflag :node_up[435] [[ -z '' ]] :node_up[438] : If this is the first node up, configure split merge handling. :node_up[440] cl_cfg_sm_rt :cl_cfg_sm_rt[738] version=1.34 :cl_cfg_sm_rt[741] clctrl_rc=0 :cl_cfg_sm_rt[741] typeset -li clctrl_rc :cl_cfg_sm_rt[742] src_rc=0 :cl_cfg_sm_rt[742] typeset -li src_rc :cl_cfg_sm_rt[743] cl_migcheck_rc=0 :cl_cfg_sm_rt[743] typeset -li cl_migcheck_rc :cl_cfg_sm_rt[744] bad_policy='' :cl_cfg_sm_rt[745] SMP='' :cl_cfg_sm_rt[748] : If we are in migration - if all nodes are not up to this level - do not :cl_cfg_sm_rt[749] : attempt any configuration. :cl_cfg_sm_rt[751] clmixver :cl_cfg_sm_rt[751] version=22 :cl_cfg_sm_rt[752] (( 22 < 14 )) :cl_cfg_sm_rt[761] : Retrieve configured policies :cl_cfg_sm_rt[763] clodmget -q 'policy = action' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[763] Action=Reboot :cl_cfg_sm_rt[764] clodmget -q 'policy = split' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[764] Split=None :cl_cfg_sm_rt[765] clodmget -q 'policy = merge' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[765] Merge=Majority :cl_cfg_sm_rt[766] clodmget -q 'policy = tiebreaker' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[766] TieBreaker='' :cl_cfg_sm_rt[767] clodmget -q 'policy = nfs_quorumserver' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[767] nfs_quorumserver='' :cl_cfg_sm_rt[768] clodmget -q 'policy = local_quorumdirectory' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[768] local_quorumdirectory='' :cl_cfg_sm_rt[769] clodmget -q 'policy = remote_quorumdirectory' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[769] remote_quorumdirectory='' :cl_cfg_sm_rt[770] clodmget -q 'policy = anhp' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[770] is_anhp='' :cl_cfg_sm_rt[771] clodmget -q 'policy = scsi' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[771] is_scsi='' :cl_cfg_sm_rt[772] clodmget -q name=clutils.log -f value -n HACMPlogs :cl_cfg_sm_rt[772] CLUTILS_LOG=/var/hacmp/log/clutils.log :cl_cfg_sm_rt[775] : If policies are unset, apply the default policies :cl_cfg_sm_rt[777] Split=None :cl_cfg_sm_rt[778] Merge=Majority :cl_cfg_sm_rt[779] Action=Reboot :cl_cfg_sm_rt[782] : If tiebreaker was a configured policy, be sure that one was defined :cl_cfg_sm_rt[784] [[ -z '' ]] :cl_cfg_sm_rt[786] [[ None == TieBreaker ]] :cl_cfg_sm_rt[790] [[ Majority == TieBreaker ]] :cl_cfg_sm_rt[795] [[ -n '' ]] :cl_cfg_sm_rt[807] : Set up the interlock file for use by smcaactrl. This tells :cl_cfg_sm_rt[808] : smcaactrl to allow the following CAA operations. :cl_cfg_sm_rt[810] date :cl_cfg_sm_rt[810] 1> /usr/es/sbin/cluster/etc/cl_cfg_sm_rt.28049740 :cl_cfg_sm_rt[811] trap 'on_exit $?' EXIT :cl_cfg_sm_rt[814] : Setting up CAA tunable local_merge_policy :cl_cfg_sm_rt[816] typeset -i caa_level :cl_cfg_sm_rt[817] lslpp -l bos.cluster.rte :cl_cfg_sm_rt[817] grep bos.cluster.rte :cl_cfg_sm_rt[817] uniq :cl_cfg_sm_rt[817] awk -F ' ' '{print $2}' :cl_cfg_sm_rt[817] tr -d . :cl_cfg_sm_rt[817] caa_level=725102 :cl_cfg_sm_rt[818] (( 725102 >=7140 )) :cl_cfg_sm_rt[819] configure_local_merge_policy :cl_cfg_sm_rt[configure_local_merge_policy:665] typeset -i clctrl_rc :cl_cfg_sm_rt[configure_local_merge_policy:666] [[ -z '' ]] :cl_cfg_sm_rt[configure_local_merge_policy:666] [[ -z '' ]] :cl_cfg_sm_rt[configure_local_merge_policy:667] capability=0 :cl_cfg_sm_rt[configure_local_merge_policy:667] typeset -i capability :cl_cfg_sm_rt[configure_local_merge_policy:669] cl_get_capabilities -i 6 :cl_cfg_sm_rt[configure_local_merge_policy:669] 2>& 1 :cl_cfg_sm_rt[configure_local_merge_policy:669] caa_sm_capability=$':cl_cfg_sm_rt[configure_local_merge_policy:669] LC_ALL=C\ncl_get_capabilities[178]: version 1.9\ncapability is 6\n\tid: 6 version: 1 flag: 1 ' :cl_cfg_sm_rt[configure_local_merge_policy:670] [[ -n $':cl_cfg_sm_rt[configure_local_merge_policy:669] LC_ALL=C\ncl_get_capabilities[178]: version 1.9\ncapability is 6\n\tid: 6 version: 1 flag: 1 ' ]] :cl_cfg_sm_rt[configure_local_merge_policy:674] : If Sub Cluster Split Merge capability is defined :cl_cfg_sm_rt[configure_local_merge_policy:675] : and globally available, then capability is set to 1 :cl_cfg_sm_rt[configure_local_merge_policy:677] capability='1 ' :cl_cfg_sm_rt[configure_local_merge_policy:680] (( 1 == 1 )) :cl_cfg_sm_rt[configure_local_merge_policy:682] : Sub Cluster Split-Merge capability is available cluster wide :cl_cfg_sm_rt[configure_local_merge_policy:684] [[ Majority != None ]] :cl_cfg_sm_rt[configure_local_merge_policy:686] clctrl -tune -o local_merge_policy=h 1 tunable updated on cluster epprda_cluster. :cl_cfg_sm_rt[configure_local_merge_policy:687] clctrl_rc=0 :cl_cfg_sm_rt[configure_local_merge_policy:688] (( 0 != 0 )) :cl_cfg_sm_rt[configure_local_merge_policy:725] return 0 :cl_cfg_sm_rt[820] rc=0 :cl_cfg_sm_rt[820] typeset -i rc :cl_cfg_sm_rt[821] (( 0 < 0 )) :cl_cfg_sm_rt[827] : Configure CAA in accordance with the specified or defaulted policies :cl_cfg_sm_rt[828] : for Merge :cl_cfg_sm_rt[830] clctrl -tune -a epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).communication_mode = u epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).config_timeout = 240 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).deadman_mode = a epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).dr_enabled = 1 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).link_timeout = 30000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).local_merge_policy = h epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).network_fdt = 20000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).no_if_traffic_monitor = 0 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).node_down_delay = 10000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).node_timeout = 30000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).packet_ttl = 32 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).remote_hb_factor = 1 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).repos_mode = e epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).site_merge_policy = h :cl_cfg_sm_rt[831] clctrl_rc=0 :cl_cfg_sm_rt[832] : Return code from 'clctrl -tune -a' is 0 :cl_cfg_sm_rt[835] : If the current deadman mode is not set to ASSERT, :cl_cfg_sm_rt[836] : change it to that :cl_cfg_sm_rt[842] clctrl -tune -x deadman_mode :cl_cfg_sm_rt[842] cut -f2 -d: :cl_cfg_sm_rt[842] current_deadman_mode=a :cl_cfg_sm_rt[843] [[ a != a ]] :cl_cfg_sm_rt[849] : Determine the current site merge policy, to see if it needs :cl_cfg_sm_rt[850] : to be changed :cl_cfg_sm_rt[852] clctrl -tune -x site_merge_policy :cl_cfg_sm_rt[852] cut -f2 -d: :cl_cfg_sm_rt[852] current_merge_policy=h :cl_cfg_sm_rt[854] [[ Majority == Manual ]] :cl_cfg_sm_rt[865] [[ Majority == None ]] :cl_cfg_sm_rt[878] : Everything else - tie breaker, majority, nfs - is heuristic merge policy :cl_cfg_sm_rt[880] [[ h != h ]] :cl_cfg_sm_rt[886] clctrl_rc=0 :cl_cfg_sm_rt[887] (( 0 != 0 )) :cl_cfg_sm_rt[901] [[ -n '' ]] :cl_cfg_sm_rt[919] RSCT_START_RETRIES=0 :cl_cfg_sm_rt[919] typeset -li RSCT_START_RETRIES :cl_cfg_sm_rt[920] MIN_RSCT_RETRIES=1 :cl_cfg_sm_rt[920] typeset -li MIN_RSCT_RETRIES :cl_cfg_sm_rt[921] MAX_RSCT_RETRIES=15 :cl_cfg_sm_rt[921] typeset -li MAX_RSCT_RETRIES :cl_cfg_sm_rt[922] grep ^RSCT_START_RETRIES /etc/environment :cl_cfg_sm_rt[922] eval :cl_cfg_sm_rt[923] (( 0 < 1 )) :cl_cfg_sm_rt[923] RSCT_START_RETRIES=1 :cl_cfg_sm_rt[924] (( 1 > 15 )) :cl_cfg_sm_rt[926] RSCT_TB_WAITTIME=0 :cl_cfg_sm_rt[926] typeset -li RSCT_TB_WAITTIME :cl_cfg_sm_rt[927] grep ^RSCT_TB_WAITTIME /etc/environment :cl_cfg_sm_rt[927] eval :cl_cfg_sm_rt[928] (( 0 <= 0 )) :cl_cfg_sm_rt[928] RSCT_TB_WAITTIME=30 :cl_cfg_sm_rt[930] RSCT_START_WAIT=0 :cl_cfg_sm_rt[930] typeset -li RSCT_START_WAIT :cl_cfg_sm_rt[931] MIN_RSCT_WAIT=10 :cl_cfg_sm_rt[931] typeset -li MIN_RSCT_WAIT :cl_cfg_sm_rt[932] MAX_RSCT_WAIT=60 :cl_cfg_sm_rt[932] typeset -li MAX_RSCT_WAIT :cl_cfg_sm_rt[933] grep ^RSCT_START_WAIT /etc/environment :cl_cfg_sm_rt[933] eval :cl_cfg_sm_rt[934] (( 0 < 10 )) :cl_cfg_sm_rt[934] RSCT_START_WAIT=10 :cl_cfg_sm_rt[935] (( 10 > 60 )) :cl_cfg_sm_rt[937] (( retries=0)) :cl_cfg_sm_rt[937] (( 0 < 1)) :cl_cfg_sm_rt[939] lsrsrc IBM.PeerNode :cl_cfg_sm_rt[939] 1>> /var/hacmp/log/clutils.log 2>& 1 :cl_cfg_sm_rt[941] break :cl_cfg_sm_rt[947] (( 0 >= 1 )) :cl_cfg_sm_rt[954] : Configure RSCT in accordance with the specified or defaulted policies :cl_cfg_sm_rt[955] : for Split :cl_cfg_sm_rt[965] CT_MANAGEMENT_SCOPE=2 :cl_cfg_sm_rt[965] export CT_MANAGEMENT_SCOPE :cl_cfg_sm_rt[966] lsrsrc -t -c -x IBM.PeerNode OpQuorumTieBreaker :cl_cfg_sm_rt[966] Current_TB='"Success" ' :cl_cfg_sm_rt[967] Current_TB='"Success' :cl_cfg_sm_rt[968] Current_TB=Success :cl_cfg_sm_rt[969] [[ None == None ]] :cl_cfg_sm_rt[971] [[ Success == Success ]] :cl_cfg_sm_rt[973] chrsrc -c IBM.PeerNode OpQuorumTieBreaker=Operator :cl_cfg_sm_rt[974] src_rc=0 :cl_cfg_sm_rt[975] (( 0 != 0 )) :cl_cfg_sm_rt[981] (( 0 == 0 )) :cl_cfg_sm_rt[983] chrsrc -s Name='="Success"' IBM.TieBreaker PostReserveWaitTime=30 :cl_cfg_sm_rt[984] src_rc=0 :cl_cfg_sm_rt[985] (( 0 != 0 )) :cl_cfg_sm_rt[990] chrsrc -c IBM.PeerNode OpQuorumTieBreaker=Success :cl_cfg_sm_rt[991] src_rc=0 :cl_cfg_sm_rt[992] (( 0 != 0 )) :cl_cfg_sm_rt[1044] src_rc=0 :cl_cfg_sm_rt[1045] (( 0 != 0 )) :cl_cfg_sm_rt[1053] : Configure RSCT Action :cl_cfg_sm_rt[1055] chrsrc -c IBM.PeerNode QuorumType=4 :cl_cfg_sm_rt[1056] src_rc=0 :cl_cfg_sm_rt[1057] (( 0 != 0 )) :cl_cfg_sm_rt[1064] chrsrc -c IBM.PeerNode CriticalMode=2 :cl_cfg_sm_rt[1065] src_rc=0 :cl_cfg_sm_rt[1066] (( 0 != 0 )) :cl_cfg_sm_rt[1073] [[ Reboot == Reboot ]] :cl_cfg_sm_rt[1075] chrsrc -c IBM.PeerNode CritRsrcProtMethod=1 :cl_cfg_sm_rt[1077] src_rc=0 :cl_cfg_sm_rt[1078] (( 0 != 0 )) :cl_cfg_sm_rt[1086] : Configure RSCT Critical Resource Daemon Grace Period for cluster level. :cl_cfg_sm_rt[1088] typeset grace_period :cl_cfg_sm_rt[1089] clodmget -f crit_daemon_restart_grace_period HACMPcluster :cl_cfg_sm_rt[1089] grace_period=60 :cl_cfg_sm_rt[1090] lsrsrc -c IBM.PeerNode :cl_cfg_sm_rt[1090] LC_ALL=C :cl_cfg_sm_rt[1090] grep CritDaemonRestartGracePeriod :cl_cfg_sm_rt[1090] awk -F= '{print $2}' :cl_cfg_sm_rt[1090] rsct_grace_period=' 60' :cl_cfg_sm_rt[1091] [[ -n ' 60' ]] :cl_cfg_sm_rt[1092] (( 60 != 60 )) :cl_cfg_sm_rt[1104] : Configure RSCT Critical Resource Daemon Grace Period for node level. :cl_cfg_sm_rt[1106] typeset node_grace_period :cl_cfg_sm_rt[1107] typeset node_list :cl_cfg_sm_rt[1108] typeset rsct_node_grace_period :cl_cfg_sm_rt[1110] : Get the CAA active nodes list :cl_cfg_sm_rt[1112] lscluster -m :cl_cfg_sm_rt[1112] grep -p 'State of node: UP' :cl_cfg_sm_rt[1112] grep -w 'Node name:' :cl_cfg_sm_rt[1112] cut -f2 -d: :cl_cfg_sm_rt[1112] node_list=$' epprda\n epprds' :cl_cfg_sm_rt[1115] clodmget -n -q object='COMMUNICATION_PATH and value=epprda' -f name HACMPnode :cl_cfg_sm_rt[1115] host_name=epprda :cl_cfg_sm_rt[1116] clodmget -n -q object='CRIT_DAEMON_RESTART_GRACE_PERIOD and name=epprda' -f value HACMPnode :cl_cfg_sm_rt[1116] node_grace_period='' :cl_cfg_sm_rt[1117] [[ -n '' ]] :cl_cfg_sm_rt[1115] clodmget -n -q object='COMMUNICATION_PATH and value=epprds' -f name HACMPnode :cl_cfg_sm_rt[1115] host_name=epprds :cl_cfg_sm_rt[1116] clodmget -n -q object='CRIT_DAEMON_RESTART_GRACE_PERIOD and name=epprds' -f value HACMPnode :cl_cfg_sm_rt[1116] node_grace_period='' :cl_cfg_sm_rt[1117] [[ -n '' ]] :cl_cfg_sm_rt[1134] : Success exit. Display the CAA and RSCT configuration :cl_cfg_sm_rt[1136] clctrl -tune -a epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).communication_mode = u epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).config_timeout = 240 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).deadman_mode = a epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).dr_enabled = 1 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).link_timeout = 30000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).local_merge_policy = h epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).network_fdt = 20000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).no_if_traffic_monitor = 0 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).node_down_delay = 10000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).node_timeout = 30000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).packet_ttl = 32 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).remote_hb_factor = 1 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).repos_mode = e epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).site_merge_policy = h :cl_cfg_sm_rt[1137] lscluster -m Calling node query for all nodes... Node query number of nodes examined: 2 Node name: epprda Cluster shorthand id for node: 1 UUID for node: f42873b8-9ee2-11ed-8018-fae6134ea920 State of node: UP NODE_LOCAL Reason: NONE Smoothed rtt to node: 0 Mean Deviation in network rtt to node: 0 Number of clusters node is a member in: 1 CLUSTER NAME SHID UUID epprda_cluster 0 f43c91c2-9ee2-11ed-8018-fae6134ea920 SITE NAME SHID UUID LOCAL 1 51735173-5173-5173-5173-517351735173 Points of contact for node: 0 ---------------------------------------------------------------------------- Node name: epprds Cluster shorthand id for node: 2 UUID for node: f42873fe-9ee2-11ed-8018-fae6134ea920 State of node: UP Reason: NONE Smoothed rtt to node: 8 Mean Deviation in network rtt to node: 3 Number of clusters node is a member in: 1 CLUSTER NAME SHID UUID epprda_cluster 0 f43c91c2-9ee2-11ed-8018-fae6134ea920 SITE NAME SHID UUID LOCAL 1 51735173-5173-5173-5173-517351735173 Points of contact for node: 1 ----------------------------------------------------------------------- Interface State Protocol Status SRC_IP->DST_IP ----------------------------------------------------------------------- tcpsock->02 UP IPv4 none 61.81.244.134->61.81.244.123 :cl_cfg_sm_rt[1138] lsrsrc -x -A b IBM.PeerNode resource 1: Name = "epprds" NodeList = {2} RSCTVersion = "3.2.6.4" ClassVersions = {} CritRsrcProtMethod = 0 IsQuorumNode = 1 IsPreferredGSGL = 1 NodeUUID = "f42873fe-9ee2-11ed-8018-fae6134ea920" HostName = "epprds" TBPriority = 0 CritDaemonRestartGracePeriod = -1 ActivePeerDomain = "epprda_cluster" NodeNameList = {"epprds"} OpState = 1 ConfigChanged = 1 CritRsrcActive = 0 OpUsabilityState = 1 MaintenanceState = 0 resource 2: Name = "epprda" NodeList = {1} RSCTVersion = "3.2.6.4" ClassVersions = {} CritRsrcProtMethod = 0 IsQuorumNode = 1 IsPreferredGSGL = 1 NodeUUID = "f42873b8-9ee2-11ed-8018-fae6134ea920" HostName = "epprda" TBPriority = 0 CritDaemonRestartGracePeriod = -1 ActivePeerDomain = "epprda_cluster" NodeNameList = {"epprda"} OpState = 1 ConfigChanged = 1 CritRsrcActive = 0 OpUsabilityState = 1 MaintenanceState = 0 :cl_cfg_sm_rt[1139] lsrsrc -x -c -A b IBM.PeerNode resource 1: CommittedRSCTVersion = "3.2.2.0" ActiveVersionChanging = 0 OpQuorumOverride = 0 CritRsrcProtMethod = 1 OpQuorumTieBreaker = "Success" QuorumType = 4 QuorumGroupName = "" Fanout = 32 OpFenceGroup = "" NodeCleanupCommand = "" NodeCleanupCriteria = "" QuorumLessStartupTimeout = 120 CriticalMode = 2 NotifyQuorumChangedCommand = "" NamePolicy = 1 LiveUpdateOptions = "" QuorumNotificationRespWaitTime = 0 MaintenanceModeConfig = "" CritDaemonRestartGracePeriod = 60 :cl_cfg_sm_rt[1141] return 0 :cl_cfg_sm_rt[1] on_exit 0 :node_up[441] : exit status of cl_cfg_sm_rt is 0 :node_up[498] : Enable NFS crossmounts during manual start :node_up[500] [[ -n false ]] :node_up[500] [[ false == true ]] :node_up[607] : When RG dependencies are not configured we call node_up_local/remote, :node_up[608] : followed by process_resources to process any remaining groups :node_up[610] [[ TRUE == FALSE ]] :node_up[657] [[ epprda == epprda ]] :node_up[660] : Perform any deferred TCP daemon startup, if necessary, :node_up[661] : along with any necessary start up of iSCSI devices. :node_up[663] cl_telinit :cl_telinit[178] version=%I% :cl_telinit[182] TELINIT_FILE=/usr/es/sbin/cluster/.telinit :cl_telinit[183] USE_TELINIT_FILE=/usr/es/sbin/cluster/.use_telinit :cl_telinit[185] [[ -f /usr/es/sbin/cluster/.use_telinit ]] :cl_telinit[189] USE_TELINIT=0 :cl_telinit[198] [[ '' == -boot ]] :cl_telinit[236] cl_lsitab clinit :cl_telinit[236] 1> /dev/null 2>& 1 :cl_telinit[239] : telinit a disabled :cl_telinit[241] return 0 :node_up[664] : exit status of cl_telinit is: 0 :node_up[667] return 0 Jan 28 2023 18:03:36 EVENT COMPLETED: node_up epprda 0 |2023-01-28T18:03:36|8560|EVENT COMPLETED: node_up epprda 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:03:36.185464 + echo '|2023-01-28T18:03:36.185464|INFO: node_up|epprda|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 18:03:38 EVENT START: rg_move_fence epprda 1 |2023-01-28T18:03:38|8561|EVENT START: rg_move_fence epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:03:38.399965 + echo '|2023-01-28T18:03:38.399965|INFO: rg_move_fence|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_fence[62] [[ high == high ]] :rg_move_fence[62] version=1.11 :rg_move_fence[63] NODENAME=epprda :rg_move_fence[63] export NODENAME :rg_move_fence[65] set -u :rg_move_fence[67] [ 2 != 2 ] :rg_move_fence[73] set +u :rg_move_fence[75] [[ -z TRUE ]] :rg_move_fence[80] [[ TRUE == TRUE ]] :rg_move_fence[82] LOCAL_NODENAME=epprda :rg_move_fence[83] odmget -qid=1 HACMPgroup :rg_move_fence[83] egrep 'group =' :rg_move_fence[83] awk '{print $3}' :rg_move_fence[83] eval RGNAME='"epprd_rg"' :rg_move_fence[1] RGNAME=epprd_rg +epprd_rg:rg_move_fence[84] GROUPNAME=epprd_rg +epprd_rg:rg_move_fence[85] group_state='$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[86] set +u +epprd_rg:rg_move_fence[87] eval print '$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[1] print +epprd_rg:rg_move_fence[87] RG_MOVE_ONLINE='' +epprd_rg:rg_move_fence[87] export RG_MOVE_ONLINE +epprd_rg:rg_move_fence[88] set -u +epprd_rg:rg_move_fence[89] RG_MOVE_ONLINE=TMP_ERROR +epprd_rg:rg_move_fence[91] set -a +epprd_rg:rg_move_fence[92] clsetenvgrp epprda rg_move epprd_rg '' :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 +epprd_rg:rg_move_fence[92] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[93] RC=0 +epprd_rg:rg_move_fence[94] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[1] FORCEDOWN_GROUPS='' +epprd_rg:rg_move_fence[2] RESOURCE_GROUPS='' +epprd_rg:rg_move_fence[3] HOMELESS_GROUPS='' +epprd_rg:rg_move_fence[4] HOMELESS_FOLLOWER_GROUPS='' +epprd_rg:rg_move_fence[5] ERRSTATE_GROUPS='' +epprd_rg:rg_move_fence[6] PRINCIPAL_ACTIONS='' +epprd_rg:rg_move_fence[7] ASSOCIATE_ACTIONS='' +epprd_rg:rg_move_fence[8] AUXILLIARY_ACTIONS='' +epprd_rg:rg_move_fence[8] SIBLING_GROUPS='' +epprd_rg:rg_move_fence[9] SIBLING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[10] SIBLING_ACQUIRING_GROUPS='' +epprd_rg:rg_move_fence[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[12] SIBLING_RELEASING_GROUPS='' +epprd_rg:rg_move_fence[13] SIBLING_RELEASING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[95] set +a +epprd_rg:rg_move_fence[96] [ 0 -ne 0 ] +epprd_rg:rg_move_fence[103] process_resources FENCE :rg_move_fence[3318] version=1.169 :rg_move_fence[3321] STATUS=0 :rg_move_fence[3322] sddsrv_off=FALSE :rg_move_fence[3324] true :rg_move_fence[3326] : call rgpa, and it will tell us what to do next :rg_move_fence[3328] set -a :rg_move_fence[3329] clRGPA FENCE :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa FENCE 2023-01-28T18:03:38.503319 clrgpa :clRGPA[+55] exit 0 :rg_move_fence[3329] eval JOB_TYPE=NONE :rg_move_fence[1] JOB_TYPE=NONE :rg_move_fence[3330] RC=0 :rg_move_fence[3331] set +a :rg_move_fence[3333] (( 0 != 0 )) :rg_move_fence[3342] RESOURCE_GROUPS='' :rg_move_fence[3343] GROUPNAME='' :rg_move_fence[3343] export GROUPNAME :rg_move_fence[3353] IS_SERVICE_START=1 :rg_move_fence[3354] IS_SERVICE_STOP=1 :rg_move_fence[3360] [[ NONE == RELEASE ]] :rg_move_fence[3360] [[ NONE == ONLINE ]] :rg_move_fence[3729] break :rg_move_fence[3740] : If sddsrv was turned off above, turn it back on again :rg_move_fence[3742] [[ FALSE == TRUE ]] :rg_move_fence[3747] exit 0 +epprd_rg:rg_move_fence[104] : exit status of process_resources FENCE is: 0 +epprd_rg:rg_move_fence[107] [[ TRUE == TRUE ]] +epprd_rg:rg_move_fence[109] export EVENT_TYPE +epprd_rg:rg_move_fence[110] echo ACQUIRE_PRIMARY ACQUIRE_PRIMARY +epprd_rg:rg_move_fence[111] [[ -n '' ]] +epprd_rg:rg_move_fence[141] exit 0 Jan 28 2023 18:03:38 EVENT COMPLETED: rg_move_fence epprda 1 0 |2023-01-28T18:03:38|8561|EVENT COMPLETED: rg_move_fence epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:03:38.595696 + echo '|2023-01-28T18:03:38.595696|INFO: rg_move_fence|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 18:03:38 EVENT START: rg_move_acquire epprda 1 |2023-01-28T18:03:38|8561|EVENT START: rg_move_acquire epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:03:38.789540 + echo '|2023-01-28T18:03:38.789540|INFO: rg_move_acquire|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_acquire[+54] [[ high == high ]] :rg_move_acquire[+54] version=1.9.1.7 :rg_move_acquire[+57] set -u :rg_move_acquire[+59] [ 2 != 2 ] :rg_move_acquire[+65] set +u :rg_move_acquire[+67] :rg_move_acquire[+67] clodmget -n -q id=1 -f group HACMPgroup RG=epprd_rg :rg_move_acquire[+68] export RG :rg_move_acquire[+70] [[ ACQUIRE_PRIMARY == ACQUIRE_PRIMARY ]] :rg_move_acquire[+75] typeset -i anhp_ret=0 :rg_move_acquire[+76] typeset -i scsi_ret=0 :rg_move_acquire[+78] clodmget -n -q policy = anhp -f value HACMPsplitmerge :rg_move_acquire[+78] typeset ANHP_ENABLED= :rg_move_acquire[+78] [[ == Yes ]] :rg_move_acquire[+87] clodmget -n -q policy = scsi -f value HACMPsplitmerge :rg_move_acquire[+87] typeset SCSIPR_ENABLED= :rg_move_acquire[+87] [[ == Yes ]] :rg_move_acquire[+106] (( 0 == 1 && 0 == 1 )) :rg_move_acquire[+109] (( 0 == 1 && 0 == 0 )) :rg_move_acquire[+112] (( 0 == 1 && 0 == 0 )) :rg_move_acquire[+118] clcallev rg_move epprda 1 ACQUIRE Jan 28 2023 18:03:38 EVENT START: rg_move epprda 1 ACQUIRE |2023-01-28T18:03:38|8561|EVENT START: rg_move epprda 1 ACQUIRE| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-01-28T18:03:38.920496 :clevlog[amlog_trace:320] echo '|2023-01-28T18:03:38.920496|INFO: rg_move|epprd_rg|epprda|1|ACQUIRE' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :rg_move[76] version=%I% :rg_move[86] STATUS=0 :rg_move[88] [[ ! -n '' ]] :rg_move[90] EMULATE=REAL :rg_move[96] set -u :rg_move[98] NODENAME=epprda :rg_move[98] export NODENAME :rg_move[99] RGID=1 :rg_move[100] (( 3 == 3 )) :rg_move[102] ACTION=ACQUIRE :rg_move[108] : serial number for this event is 8561 :rg_move[112] RG_UP_POSTEVENT_ON_NODE=epprda :rg_move[112] export RG_UP_POSTEVENT_ON_NODE :rg_move[116] clodmget -qid=1 -f group -n HACMPgroup :rg_move[116] eval RGNAME=epprd_rg :rg_move[1] RGNAME=epprd_rg :rg_move[118] UPDATESTATD=0 :rg_move[119] export UPDATESTATD :rg_move[123] RG_MOVE_EVENT=true :rg_move[123] export RG_MOVE_EVENT :rg_move[128] group_state='$RESGRP_epprd_rg_epprda' :rg_move[129] set +u :rg_move[130] eval print '$RESGRP_epprd_rg_epprda' :rg_move[1] print :rg_move[130] RG_MOVE_ONLINE='' :rg_move[130] export RG_MOVE_ONLINE :rg_move[131] set -u :rg_move[132] RG_MOVE_ONLINE=TMP_ERROR :rg_move[139] rm -f /tmp/.NFSSTOPPED :rg_move[140] rm -f /tmp/.RPCLOCKDSTOPPED :rg_move[147] set -a :rg_move[148] clsetenvgrp epprda rg_move epprd_rg :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 :rg_move[148] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[149] RC=0 :rg_move[150] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[1] FORCEDOWN_GROUPS='' :rg_move[2] RESOURCE_GROUPS='' :rg_move[3] HOMELESS_GROUPS='' :rg_move[4] HOMELESS_FOLLOWER_GROUPS='' :rg_move[5] ERRSTATE_GROUPS='' :rg_move[6] PRINCIPAL_ACTIONS='' :rg_move[7] ASSOCIATE_ACTIONS='' :rg_move[8] AUXILLIARY_ACTIONS='' :rg_move[8] SIBLING_GROUPS='' :rg_move[9] SIBLING_NODES_BY_GROUP='' :rg_move[10] SIBLING_ACQUIRING_GROUPS='' :rg_move[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' :rg_move[12] SIBLING_RELEASING_GROUPS='' :rg_move[13] SIBLING_RELEASING_NODES_BY_GROUP='' :rg_move[151] set +a :rg_move[155] (( 0 != 0 )) :rg_move[155] [[ -z epprd_rg ]] :rg_move[164] [[ -z TRUE ]] :rg_move[241] AM_SYNC_CALLED_BY=RG_MOVE :rg_move[241] export AM_SYNC_CALLED_BY :rg_move[242] process_resources :process_resources[3318] version=1.169 :process_resources[3321] STATUS=0 :process_resources[3322] sddsrv_off=FALSE :process_resources[3324] true :process_resources[3326] : call rgpa, and it will tell us what to do next :process_resources[3328] set -a :process_resources[3329] clRGPA :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa 2023-01-28T18:03:39.040868 clrgpa :clRGPA[+55] exit 0 :process_resources[3329] eval JOB_TYPE=ACQUIRE RESOURCE_GROUPS='"epprd_rg"' PRINCIPAL_ACTION='"ACQUIRE"' AUXILLIARY_ACTION='"NONE"' :process_resources[1] JOB_TYPE=ACQUIRE :process_resources[1] RESOURCE_GROUPS=epprd_rg :process_resources[1] PRINCIPAL_ACTION=ACQUIRE :process_resources[1] AUXILLIARY_ACTION=NONE :process_resources[3330] RC=0 :process_resources[3331] set +a :process_resources[3333] (( 0 != 0 )) :process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[3360] [[ ACQUIRE == ONLINE ]] +epprd_rg:process_resources[3652] set_resource_group_state ACQUIRING +epprd_rg:process_resources[set_resource_group_state:82] PS4_FUNC=set_resource_group_state +epprd_rg:process_resources[set_resource_group_state:82] typeset PS4_FUNC +epprd_rg:process_resources[set_resource_group_state:83] [[ high == high ]] +epprd_rg:process_resources[set_resource_group_state:83] set -x +epprd_rg:process_resources[set_resource_group_state:84] STAT=0 +epprd_rg:process_resources[set_resource_group_state:85] new_status=ACQUIRING +epprd_rg:process_resources[set_resource_group_state:89] export GROUPNAME +epprd_rg:process_resources[set_resource_group_state:90] [[ ACQUIRING != DOWN ]] +epprd_rg:process_resources[set_resource_group_state:92] clchdaemons -d clstrmgr_scripts -t resource_locator -n epprda -o epprd_rg -v ACQUIRING +epprd_rg:process_resources[set_resource_group_state:100] : Resource Manager Updates +epprd_rg:process_resources[set_resource_group_state:105] amlog_trace '' 'acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:318] clcycle clavailability.log +epprd_rg:process_resources[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:process_resources[amlog_trace:319] cltime +epprd_rg:process_resources[amlog_trace:319] DATE=2023-01-28T18:03:39.075279 +epprd_rg:process_resources[amlog_trace:320] echo '|2023-01-28T18:03:39.075279|INFO: acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:process_resources[set_resource_group_state:106] cl_RMupdate acquiring epprd_rg process_resources 2023-01-28T18:03:39.099108 2023-01-28T18:03:39.103646 +epprd_rg:process_resources[set_resource_group_state:153] return 0 +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:03:39.115492 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=WPAR ACTION=ACQUIRE RESOURCE_GROUPS='"epprd_rg' '"' +epprd_rg:process_resources[1] JOB_TYPE=WPAR +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ WPAR == RELEASE ]] +epprd_rg:process_resources[3360] [[ WPAR == ONLINE ]] +epprd_rg:process_resources[3492] process_wpars ACQUIRE +epprd_rg:process_resources[process_wpars:3265] PS4_FUNC=process_wpars +epprd_rg:process_resources[process_wpars:3265] typeset PS4_FUNC +epprd_rg:process_resources[process_wpars:3266] [[ high == high ]] +epprd_rg:process_resources[process_wpars:3266] set -x +epprd_rg:process_resources[process_wpars:3267] STAT=0 +epprd_rg:process_resources[process_wpars:3268] action=ACQUIRE +epprd_rg:process_resources[process_wpars:3268] typeset action +epprd_rg:process_resources[process_wpars:3272] export GROUPNAME +epprd_rg:process_resources[process_wpars:3275] clstart_wpar +epprd_rg:clstart_wpar[180] version=1.12.1.1 +epprd_rg:clstart_wpar[184] [[ rg_move == reconfig_resource_acquire ]] +epprd_rg:clstart_wpar[184] [[ ACQUIRE_PRIMARY == reconfig_resource_acquire ]] +epprd_rg:clstart_wpar[193] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clstart_wpar[193] [[ -z '' ]] +epprd_rg:clstart_wpar[193] exit 0 +epprd_rg:process_resources[process_wpars:3276] RC=0 +epprd_rg:process_resources[process_wpars:3285] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[process_wpars:3294] return 0 +epprd_rg:process_resources[3493] RC=0 +epprd_rg:process_resources[3495] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:03:39.145721 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=SERVICE_LABELS ACTION=ACQUIRE IP_LABELS='"epprd"' RESOURCE_GROUPS='"epprd_rg' '"' COMMUNICATION_LINKS='""' +epprd_rg:process_resources[1] JOB_TYPE=SERVICE_LABELS +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] IP_LABELS=epprd +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] COMMUNICATION_LINKS='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ SERVICE_LABELS == RELEASE ]] +epprd_rg:process_resources[3360] [[ SERVICE_LABELS == ONLINE ]] +epprd_rg:process_resources[3407] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[3409] acquire_service_labels +epprd_rg:process_resources[acquire_service_labels:3083] PS4_FUNC=acquire_service_labels +epprd_rg:process_resources[acquire_service_labels:3083] typeset PS4_FUNC +epprd_rg:process_resources[acquire_service_labels:3084] [[ high == high ]] +epprd_rg:process_resources[acquire_service_labels:3084] set -x +epprd_rg:process_resources[acquire_service_labels:3085] STAT=0 +epprd_rg:process_resources[acquire_service_labels:3086] clcallev acquire_service_addr Jan 28 2023 18:03:39 EVENT START: acquire_service_addr |2023-01-28T18:03:39|8561|EVENT START: acquire_service_addr | +epprd_rg:acquire_service_addr[416] version=1.74.1.5 +epprd_rg:acquire_service_addr[423] [[ SERVICE_LABELS != 0 ]] +epprd_rg:acquire_service_addr[423] [[ SERVICE_LABELS != GROUP ]] +epprd_rg:acquire_service_addr[424] PROC_RES=true +epprd_rg:acquire_service_addr[440] saveNSORDER=UNDEFINED +epprd_rg:acquire_service_addr[441] NSORDER=local +epprd_rg:acquire_service_addr[442] export NSORDER +epprd_rg:acquire_service_addr[445] cl_RMupdate resource_acquiring All_service_addrs acquire_service_addr 2023-01-28T18:03:39.227529 2023-01-28T18:03:39.231726 +epprd_rg:acquire_service_addr[452] export GROUPNAME +epprd_rg:acquire_service_addr[458] [[ true == true ]] +epprd_rg:acquire_service_addr[459] get_list_head epprd +epprd_rg:acquire_service_addr[459] read SERVICELABELS +epprd_rg:acquire_service_addr[460] get_list_tail epprd +epprd_rg:acquire_service_addr[460] read IP_LABELS +epprd_rg:acquire_service_addr[471] clgetif -a epprd +epprd_rg:acquire_service_addr[471] 2> /dev/null +epprd_rg:acquire_service_addr[472] (( 3 != 0 )) +epprd_rg:acquire_service_addr[477] cllsif -J '~' -Sn epprd +epprd_rg:acquire_service_addr[477] cut -d~ -f3 +epprd_rg:acquire_service_addr[477] uniq +epprd_rg:acquire_service_addr[477] NETWORK=net_ether_01 +epprd_rg:acquire_service_addr[478] cllsif -J '~' -Si epprda +epprd_rg:acquire_service_addr[478] awk -F~ -v NET=net_ether_01 '{if ($2 == "boot" && $3 == NET) print $1}' +epprd_rg:acquire_service_addr[478] sort +epprd_rg:acquire_service_addr[478] boot_list=epprda +epprd_rg:acquire_service_addr[480] [[ -z epprda ]] +epprd_rg:acquire_service_addr[492] best_boot_addr net_ether_01 epprda +epprd_rg:acquire_service_addr[best_boot_addr:106] NETWORK=net_ether_01 +epprd_rg:acquire_service_addr[best_boot_addr:106] typeset NETWORK +epprd_rg:acquire_service_addr[best_boot_addr:107] shift +epprd_rg:acquire_service_addr[best_boot_addr:108] candidate_boots=epprda +epprd_rg:acquire_service_addr[best_boot_addr:108] typeset candidate_boots +epprd_rg:acquire_service_addr[best_boot_addr:112] echo epprda +epprd_rg:acquire_service_addr[best_boot_addr:112] wc -l +epprd_rg:acquire_service_addr[best_boot_addr:112] tr ' ' '\n' +epprd_rg:acquire_service_addr[best_boot_addr:112] num_candidates=' 1' +epprd_rg:acquire_service_addr[best_boot_addr:112] typeset -li num_candidates +epprd_rg:acquire_service_addr[best_boot_addr:113] (( 1 == 1 )) +epprd_rg:acquire_service_addr[best_boot_addr:114] echo epprda +epprd_rg:acquire_service_addr[best_boot_addr:115] return +epprd_rg:acquire_service_addr[492] boot_addr=epprda +epprd_rg:acquire_service_addr[493] (( 0 != 0 )) +epprd_rg:acquire_service_addr[505] cut -f1 +epprd_rg:acquire_service_addr[505] clgetif -a epprda +epprd_rg:acquire_service_addr[505] 2> /dev/null +epprd_rg:acquire_service_addr[505] INTERFACE='en0 ' +epprd_rg:acquire_service_addr[507] cllsif -J '~' -Sn epprda +epprd_rg:acquire_service_addr[507] cut -f7,9 -d~ +epprd_rg:acquire_service_addr[508] read boot_dot_addr INTERFACE +epprd_rg:acquire_service_addr[508] IFS='~' +epprd_rg:acquire_service_addr[510] [[ -z en0 ]] +epprd_rg:acquire_service_addr[527] cllsif -J '~' -Sn epprd +epprd_rg:acquire_service_addr[527] cut -f7,11,15 -d~ +epprd_rg:acquire_service_addr[527] uniq +epprd_rg:acquire_service_addr[528] read service_dot_addr NETMASK INET_FAMILY +epprd_rg:acquire_service_addr[528] IFS='~' +epprd_rg:acquire_service_addr[530] [[ AF_INET == AF_INET6 ]] +epprd_rg:acquire_service_addr[534] cl_swap_IP_address rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0 +epprd_rg:cl_swap_IP_address[462] version=1.9.14.8 +epprd_rg:cl_swap_IP_address[464] cl_get_path -S +epprd_rg:cl_swap_IP_address[464] OP_SEP='~' +epprd_rg:cl_swap_IP_address[465] LC_ALL=C +epprd_rg:cl_swap_IP_address[465] export LC_ALL +epprd_rg:cl_swap_IP_address[466] RESTORE_ROUTES=/usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[468] cl_echo 33 'Starting execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0' /usr/es/sbin/cluster/events/utils/cl_swap_IP_address 'rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0' Jan 28 2023 18:03:39Starting execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0+epprd_rg:cl_swap_IP_address[470] typeset -i oslevel +epprd_rg:cl_swap_IP_address[471] /usr/bin/sed s/-//g +epprd_rg:cl_swap_IP_address[471] /usr/bin/oslevel -r +epprd_rg:cl_swap_IP_address[471] oslevel=720005 +epprd_rg:cl_swap_IP_address[476] [[ 6 == 6 ]] +epprd_rg:cl_swap_IP_address[477] [[ 6 == 7 ]] +epprd_rg:cl_swap_IP_address[484] no -a +epprd_rg:cl_swap_IP_address[484] awk '{ print $3 }' +epprd_rg:cl_swap_IP_address[484] grep ipignoreredirects +epprd_rg:cl_swap_IP_address[484] PRIOR_IPIGNORE_REDIRECTS_VALUE=0 +epprd_rg:cl_swap_IP_address[485] /usr/sbin/no -o ipignoreredirects=1 Setting ipignoreredirects to 1 +epprd_rg:cl_swap_IP_address[490] PROC_RES=false +epprd_rg:cl_swap_IP_address[491] [[ SERVICE_LABELS != 0 ]] +epprd_rg:cl_swap_IP_address[491] [[ SERVICE_LABELS != GROUP ]] +epprd_rg:cl_swap_IP_address[492] PROC_RES=true +epprd_rg:cl_swap_IP_address[495] set -u +epprd_rg:cl_swap_IP_address[497] RC=0 +epprd_rg:cl_swap_IP_address[504] netstat -in Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll en0 1500 link#2 fa.e6.13.4e.a9.20 183743545 0 60757861 0 0 en0 1500 61.81.244 61.81.244.134 183743545 0 60757861 0 0 lo0 16896 link#1 34271505 0 34271505 0 0 lo0 16896 127 127.0.0.1 34271505 0 34271505 0 0 lo0 16896 ::1%1 34271505 0 34271505 0 0 +epprd_rg:cl_swap_IP_address[505] netstat -rnC Routing tables Destination Gateway Flags Wt Policy If Cost Config_Cost Route tree for Protocol Family 2 (Internet): default 61.81.244.1 UG 1 - en0 0 0 61.81.244.0 61.81.244.134 UHSb 1 - en0 0 0 => 61.81.244/24 61.81.244.134 U 1 - en0 0 0 61.81.244.134 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.255 61.81.244.134 UHSb 1 - en0 0 0 127/8 127.0.0.1 U 1 - lo0 0 0 Route tree for Protocol Family 24 (Internet v6): ::1%1 ::1%1 UH 1 - lo0 0 0 +epprd_rg:cl_swap_IP_address[506] CASC_OR_ROT=rotating +epprd_rg:cl_swap_IP_address[507] ACQ_OR_RLSE=acquire +epprd_rg:cl_swap_IP_address[508] IF=en0 +epprd_rg:cl_swap_IP_address[509] ADDR=61.81.244.156 +epprd_rg:cl_swap_IP_address[510] OLD_ADDR=61.81.244.134 +epprd_rg:cl_swap_IP_address[511] NETMASK=255.255.255.0 +epprd_rg:cl_swap_IP_address[514] [[ rotating == cascading ]] +epprd_rg:cl_swap_IP_address[525] cut -f3 -d~ +epprd_rg:cl_swap_IP_address[525] cllsif -J '~' -Sw -n 61.81.244.156 +epprd_rg:cl_swap_IP_address[525] NET=net_ether_01 +epprd_rg:cl_swap_IP_address[528] clodmget -qidentifier=61.81.244.156 -f max_aliases -n HACMPadapter +epprd_rg:cl_swap_IP_address[528] ALIAS_FIRST=0 +epprd_rg:cl_swap_IP_address[529] grep -c -w inet +epprd_rg:cl_swap_IP_address[529] ifconfig en0 +epprd_rg:cl_swap_IP_address[529] LC_ALL=C +epprd_rg:cl_swap_IP_address[529] NUM_ADDRS=1 +epprd_rg:cl_swap_IP_address[530] [[ acquire == acquire ]] +epprd_rg:cl_swap_IP_address[533] amlog_trace '' 'Aliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_swap_IP_address[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_swap_IP_address[amlog_trace:319] cltime +epprd_rg:cl_swap_IP_address[amlog_trace:319] DATE=2023-01-28T18:03:39.464817 +epprd_rg:cl_swap_IP_address[amlog_trace:320] echo '|2023-01-28T18:03:39.464817|INFO: Aliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_swap_IP_address[535] cl_echo 7310 'cl_swap_IP_address: Configuring network interface en0 with aliased IP address 61.81.244.156' cl_swap_IP_address en0 61.81.244.156 Jan 28 2023 18:03:39cl_swap_IP_address: Configuring network interface en0 with aliased IP address 61.81.244.156+epprd_rg:cl_swap_IP_address[546] (( 1 > 1 )) +epprd_rg:cl_swap_IP_address[550] clifconfig en0 alias 61.81.244.156 netmask 255.255.255.0 firstalias +epprd_rg:clifconfig[117] version=1.9 +epprd_rg:clifconfig[121] set -A args en0 alias 61.81.244.156 netmask 255.255.255.0 firstalias +epprd_rg:clifconfig[124] interface=en0 +epprd_rg:clifconfig[125] shift +epprd_rg:clifconfig[127] [[ -n alias ]] +epprd_rg:clifconfig[129] alias_val=1 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n 61.81.244.156 ]] +epprd_rg:clifconfig[147] params=' address=61.81.244.156' +epprd_rg:clifconfig[147] addr=61.81.244.156 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n netmask ]] +epprd_rg:clifconfig[149] params=' address=61.81.244.156 netmask=255.255.255.0' +epprd_rg:clifconfig[149] shift +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n firstalias ]] +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n '' ]] +epprd_rg:clifconfig[174] [[ -n 1 ]] +epprd_rg:clifconfig[174] [[ -n epprd_rg ]] +epprd_rg:clifconfig[175] clwparname epprd_rg +epprd_rg:clwparname[38] version=1.3.1.1 +epprd_rg:clwparname[44] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clwparname[44] [[ -z '' ]] +epprd_rg:clwparname[44] exit 0 +epprd_rg:clifconfig[175] WPARNAME='' +epprd_rg:clifconfig[176] (( 0 == 0 )) +epprd_rg:clifconfig[176] [[ -n '' ]] +epprd_rg:clifconfig[218] belongs_to_an_active_wpar 61.81.244.156 +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] [[ -z '' ]] +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] return 1 +epprd_rg:clifconfig[218] read wpar_name wpar_if wpar_netmask wpar_broadcast +epprd_rg:clifconfig[218] IFS='~' +epprd_rg:clifconfig[219] rc=1 +epprd_rg:clifconfig[221] [[ 1 == 0 ]] +epprd_rg:clifconfig[275] ifconfig en0 alias 61.81.244.156 netmask 255.255.255.0 firstalias +epprd_rg:cl_swap_IP_address[584] hats_adapter_notify en0 -e 61.81.244.156 alias 2023-01-28T18:03:39.516659 hats_adapter_notify 2023-01-28T18:03:39.517619 hats_adapter_notify +epprd_rg:cl_swap_IP_address[587] check_alias_status en0 61.81.244.156 acquire +epprd_rg:cl_swap_IP_address[check_alias_status:108] CH_INTERFACE=en0 +epprd_rg:cl_swap_IP_address[check_alias_status:109] CH_ADDRESS=61.81.244.156 +epprd_rg:cl_swap_IP_address[check_alias_status:110] CH_ACQ_OR_RLSE=acquire +epprd_rg:cl_swap_IP_address[check_alias_status:118] IF_IB=en0 +epprd_rg:cl_swap_IP_address[check_alias_status:120] awk '{print index($0, "ib")}' +epprd_rg:cl_swap_IP_address[check_alias_status:120] echo en0 +epprd_rg:cl_swap_IP_address[check_alias_status:120] IS_IB=0 +epprd_rg:cl_swap_IP_address[check_alias_status:122] [[ 0 != 1 ]] +epprd_rg:cl_swap_IP_address[check_alias_status:124] clifconfig en0 +epprd_rg:cl_swap_IP_address[check_alias_status:124] fgrep -w 61.81.244.156 +epprd_rg:cl_swap_IP_address[check_alias_status:124] awk '{print $2}' +epprd_rg:clifconfig[117] version=1.9 +epprd_rg:clifconfig[121] set -A args en0 +epprd_rg:clifconfig[124] interface=en0 +epprd_rg:clifconfig[125] shift +epprd_rg:clifconfig[127] [[ -n '' ]] +epprd_rg:clifconfig[174] [[ -n '' ]] +epprd_rg:clifconfig[218] belongs_to_an_active_wpar +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] [[ -z '' ]] +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] return 1 +epprd_rg:clifconfig[218] read wpar_name wpar_if wpar_netmask wpar_broadcast +epprd_rg:clifconfig[218] IFS='~' +epprd_rg:clifconfig[219] rc=1 +epprd_rg:clifconfig[221] [[ 1 == 0 ]] +epprd_rg:clifconfig[275] ifconfig en0 +epprd_rg:cl_swap_IP_address[check_alias_status:124] ADDR=61.81.244.156 +epprd_rg:cl_swap_IP_address[check_alias_status:129] [ acquire = acquire ] +epprd_rg:cl_swap_IP_address[check_alias_status:133] [[ 61.81.244.156 != 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[check_alias_status:144] return 0 +epprd_rg:cl_swap_IP_address[588] RC=0 +epprd_rg:cl_swap_IP_address[590] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[594] amlog_trace '' 'Aliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_swap_IP_address[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_swap_IP_address[amlog_trace:319] cltime +epprd_rg:cl_swap_IP_address[amlog_trace:319] DATE=2023-01-28T18:03:39.571690 +epprd_rg:cl_swap_IP_address[amlog_trace:320] echo '|2023-01-28T18:03:39.571690|INFO: Aliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_swap_IP_address[701] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[714] flush_arp +epprd_rg:cl_swap_IP_address[flush_arp:49] arp -an +epprd_rg:cl_swap_IP_address[flush_arp:49] grep '\?' +epprd_rg:cl_swap_IP_address[flush_arp:49] tr -d '()' +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.27 61.81.244.27 (61.81.244.27) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.220 61.81.244.220 (61.81.244.220) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.221 61.81.244.221 (61.81.244.221) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.224 61.81.244.224 (61.81.244.224) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.239 61.81.244.239 (61.81.244.239) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.123 61.81.244.123 (61.81.244.123) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.146 61.81.244.146 (61.81.244.146) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.1 61.81.244.1 (61.81.244.1) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.156 61.81.244.156 (61.81.244.156) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:52] return 0 +epprd_rg:cl_swap_IP_address[716] netstat -in Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll en0 1500 link#2 fa.e6.13.4e.a9.20 183743638 0 60757986 0 0 en0 1500 61.81.244 61.81.244.156 183743638 0 60757986 0 0 en0 1500 61.81.244 61.81.244.134 183743638 0 60757986 0 0 lo0 16896 link#1 34271517 0 34271517 0 0 lo0 16896 127 127.0.0.1 34271517 0 34271517 0 0 lo0 16896 ::1%1 34271517 0 34271517 0 0 +epprd_rg:cl_swap_IP_address[717] netstat -rnC Routing tables Destination Gateway Flags Wt Policy If Cost Config_Cost Route tree for Protocol Family 2 (Internet): default 61.81.244.1 UG 1 - en0 0 0 61.81.244.0 61.81.244.156 UHSb 1 - en0 0 0 => 61.81.244/24 61.81.244.156 U 1 - en0 0 0 61.81.244.134 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.156 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.255 61.81.244.156 UHSb 1 - en0 0 0 127/8 127.0.0.1 U 1 - lo0 0 0 Route tree for Protocol Family 24 (Internet v6): ::1%1 ::1%1 UH 1 - lo0 0 0 +epprd_rg:cl_swap_IP_address[989] no -o ipignoreredirects=0 Setting ipignoreredirects to 0 +epprd_rg:cl_swap_IP_address[992] cl_echo 32 'Completed execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0. Exit status = 0' /usr/es/sbin/cluster/events/utils/cl_swap_IP_address 'rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0' 0 Jan 28 2023 18:03:39Completed execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0. Exit status = 0+epprd_rg:cl_swap_IP_address[994] date Sat Jan 28 18:03:39 KORST 2023 +epprd_rg:cl_swap_IP_address[996] exit 0 +epprd_rg:acquire_service_addr[537] RC=0 +epprd_rg:acquire_service_addr[539] (( 0 != 0 )) +epprd_rg:acquire_service_addr[549] [[ true == false ]] +epprd_rg:acquire_service_addr[560] cl_RMupdate resource_up All_nonerror_service_addrs acquire_service_addr 2023-01-28T18:03:39.649697 2023-01-28T18:03:39.654111 +epprd_rg:acquire_service_addr[565] [[ UNDEFINED != UNDEFINED ]] +epprd_rg:acquire_service_addr[568] NSORDER='' +epprd_rg:acquire_service_addr[568] export NSORDER +epprd_rg:acquire_service_addr[571] [[ true == false ]] +epprd_rg:acquire_service_addr[579] exit 0 Jan 28 2023 18:03:39 EVENT COMPLETED: acquire_service_addr 0 |2023-01-28T18:03:39|8561|EVENT COMPLETED: acquire_service_addr 0| +epprd_rg:process_resources[acquire_service_labels:3087] RC=0 +epprd_rg:process_resources[acquire_service_labels:3089] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[acquire_service_labels:3104] (( 0 != 0 )) +epprd_rg:process_resources[acquire_service_labels:3110] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. +epprd_rg:process_resources[acquire_service_labels:3112] return 0 +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:03:39.729540 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=DISKS ACTION=ACQUIRE HDISKS='"hdisk2,hdisk3,hdisk4,hdisk5,hdisk6,hdisk7,hdisk8"' RESOURCE_GROUPS='"epprd_rg' '"' VOLUME_GROUPS='"datavg,datavg,datavg,datavg,datavg,datavg,datavg"' +epprd_rg:process_resources[1] JOB_TYPE=DISKS +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] HDISKS=hdisk2,hdisk3,hdisk4,hdisk5,hdisk6,hdisk7,hdisk8 +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] VOLUME_GROUPS=datavg,datavg,datavg,datavg,datavg,datavg,datavg +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ DISKS == RELEASE ]] +epprd_rg:process_resources[3360] [[ DISKS == ONLINE ]] +epprd_rg:process_resources[3439] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[3441] FAILED_RR_RGS='' +epprd_rg:process_resources[3442] get_disks_main +epprd_rg:process_resources[get_disks_main:981] PS4_FUNC=get_disks_main +epprd_rg:process_resources[get_disks_main:981] typeset PS4_FUNC +epprd_rg:process_resources[get_disks_main:982] [[ high == high ]] +epprd_rg:process_resources[get_disks_main:982] set -x +epprd_rg:process_resources[get_disks_main:983] SKIPBRKRES=0 +epprd_rg:process_resources[get_disks_main:983] typeset -li SKIPBRKRES +epprd_rg:process_resources[get_disks_main:984] STAT=0 +epprd_rg:process_resources[get_disks_main:985] FAILURE_IN_METHOD=0 +epprd_rg:process_resources[get_disks_main:985] typeset -li FAILURE_IN_METHOD +epprd_rg:process_resources[get_disks_main:986] LIST_OF_FAILED_RGS='' +epprd_rg:process_resources[get_disks_main:989] : Below are the list of resources as generated by clrgpa +epprd_rg:process_resources[get_disks_main:991] RG_LIST=epprd_rg +epprd_rg:process_resources[get_disks_main:992] RDISK_LIST='' +epprd_rg:process_resources[get_disks_main:993] DISK_LIST=hdisk2,hdisk3,hdisk4,hdisk5,hdisk6,hdisk7,hdisk8 +epprd_rg:process_resources[get_disks_main:994] VG_LIST=datavg,datavg,datavg,datavg,datavg,datavg,datavg +epprd_rg:process_resources[get_disks_main:997] : Resource groups are processed individually. This is required because +epprd_rg:process_resources[get_disks_main:998] : the replication mechanism may differ between resource groups. +epprd_rg:process_resources[get_disks_main:1002] getReplicatedResources epprd_rg +epprd_rg:process_resources[getReplicatedResources:699] PS4_FUNC=getReplicatedResources +epprd_rg:process_resources[getReplicatedResources:699] typeset PS4_FUNC +epprd_rg:process_resources[getReplicatedResources:700] [[ high == high ]] +epprd_rg:process_resources[getReplicatedResources:700] set -x +epprd_rg:process_resources[getReplicatedResources:702] RV=false +epprd_rg:process_resources[getReplicatedResources:704] clodmget -n -f type HACMPrresmethods +epprd_rg:process_resources[getReplicatedResources:704] [[ -n 9 ]] +epprd_rg:process_resources[getReplicatedResources:707] : Replicated resource methods are defined, check for resources +epprd_rg:process_resources[getReplicatedResources:709] clodmget -q $'name like \'*_REP_RESOURCE\' AND group=epprd_rg' -f value -n HACMPresource +epprd_rg:process_resources[getReplicatedResources:709] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:718] : Verify if any backup profiles are configured and trigger cbm utilities based on that +epprd_rg:process_resources[getReplicatedResources:720] clodmget -q name=BACKUP_ENABLED -f value HACMPresource +epprd_rg:process_resources[getReplicatedResources:720] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:739] echo false +epprd_rg:process_resources[get_disks_main:1002] REPLICATED_RESOURCES=false +epprd_rg:process_resources[get_disks_main:1005] : Break out the resources for resource group epprd_rg +epprd_rg:process_resources[get_disks_main:1007] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[get_disks_main:1008] VOLUME_GROUPS='' +epprd_rg:process_resources[get_disks_main:1009] HDISKS='' +epprd_rg:process_resources[get_disks_main:1010] RHDISKS='' +epprd_rg:process_resources[get_disks_main:1011] RDISK_LIST='' +epprd_rg:process_resources[get_disks_main:1014] : Get the volume groups in resource group epprd_rg +epprd_rg:process_resources[get_disks_main:1016] print datavg,datavg,datavg,datavg,datavg,datavg,datavg +epprd_rg:process_resources[get_disks_main:1016] read VOLUME_GROUPS VG_LIST +epprd_rg:process_resources[get_disks_main:1016] IFS=: +epprd_rg:process_resources[get_disks_main:1018] : Removing duplicate entries in VG list. +epprd_rg:process_resources[get_disks_main:1020] echo datavg,datavg,datavg,datavg,datavg,datavg,datavg +epprd_rg:process_resources[get_disks_main:1020] tr , '\n' +epprd_rg:process_resources[get_disks_main:1020] xargs +epprd_rg:process_resources[get_disks_main:1020] sort -u +epprd_rg:process_resources[get_disks_main:1020] VOLUME_GROUPS=datavg +epprd_rg:process_resources[get_disks_main:1022] : Get the disks corresponding to these volume groups +epprd_rg:process_resources[get_disks_main:1024] print hdisk2,hdisk3,hdisk4,hdisk5,hdisk6,hdisk7,hdisk8 +epprd_rg:process_resources[get_disks_main:1024] read HDISKS DISK_LIST +epprd_rg:process_resources[get_disks_main:1024] IFS=: +epprd_rg:process_resources[get_disks_main:1025] HDISKS='hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8' +epprd_rg:process_resources[get_disks_main:1031] : Pick up any raw disks not returned by clrgpa +epprd_rg:process_resources[get_disks_main:1033] clodmget -q group='epprd_rg AND name=RAW_DISK' HACMPresource +epprd_rg:process_resources[get_disks_main:1033] [[ -n '' ]] +epprd_rg:process_resources[get_disks_main:1042] : Get any raw disks in resource group epprd_rg +epprd_rg:process_resources[get_disks_main:1045] print +epprd_rg:process_resources[get_disks_main:1045] read RHDISKS RDISK_LIST +epprd_rg:process_resources[get_disks_main:1045] IFS=: +epprd_rg:process_resources[get_disks_main:1046] RHDISKS='' +epprd_rg:process_resources[get_disks_main:1047] print datavg +epprd_rg:process_resources[get_disks_main:1047] read VOLUME_GROUPS +epprd_rg:process_resources[get_disks_main:1051] : At this point, the global variables below should be set to +epprd_rg:process_resources[get_disks_main:1052] : the values associated with resource group epprd_rg +epprd_rg:process_resources[get_disks_main:1054] export RESOURCE_GROUPS +epprd_rg:process_resources[get_disks_main:1055] export VOLUME_GROUPS +epprd_rg:process_resources[get_disks_main:1056] export HDISKS +epprd_rg:process_resources[get_disks_main:1057] export RHDISKS +epprd_rg:process_resources[get_disks_main:1059] [[ false == true ]] +epprd_rg:process_resources[get_disks_main:1182] get_disks +epprd_rg:process_resources[get_disks:1198] PS4_FUNC=get_disks +epprd_rg:process_resources[get_disks:1198] typeset PS4_FUNC +epprd_rg:process_resources[get_disks:1199] [[ high == high ]] +epprd_rg:process_resources[get_disks:1199] set -x +epprd_rg:process_resources[get_disks:1201] STAT=0 +epprd_rg:process_resources[get_disks:1204] : Most volume groups are Enhanced Concurrent Mode, and it should +epprd_rg:process_resources[get_disks:1205] : not be necessary to break reserves. If all the volume groups +epprd_rg:process_resources[get_disks:1206] : are ECM, we should be able to skip breaking reserves. If it +epprd_rg:process_resources[get_disks:1207] : turns out that there is a reserve on a disk in an ECM volume +epprd_rg:process_resources[get_disks:1208] : group, that will be handled by cl_pvo making an explicit call +epprd_rg:process_resources[get_disks:1209] : to cl_disk_available. +epprd_rg:process_resources[get_disks:1213] all_ecm=TRUE +epprd_rg:process_resources[get_disks:1214] IFS=: +epprd_rg:process_resources[get_disks:1214] set -- datavg +epprd_rg:process_resources[get_disks:1214] print datavg +epprd_rg:process_resources[get_disks:1216] print datavg +epprd_rg:process_resources[get_disks:1216] sort -u +epprd_rg:process_resources[get_disks:1216] tr , '\n' +epprd_rg:process_resources[get_disks:1218] clodmget -q 'name = datavg and attribute = conc_capable' -f value -n CuAt +epprd_rg:process_resources[get_disks:1218] [[ y != y ]] +epprd_rg:process_resources[get_disks:1224] [[ TRUE == FALSE ]] +epprd_rg:process_resources[get_disks:1226] [[ TRUE == TRUE ]] +epprd_rg:process_resources[get_disks:1226] return 0 +epprd_rg:process_resources[get_disks_main:1183] STAT=0 +epprd_rg:process_resources[get_disks_main:1186] return 0 +epprd_rg:process_resources[3443] tr ' ' '\n' +epprd_rg:process_resources[3443] echo +epprd_rg:process_resources[3443] FAILED_RR_RGS='' +epprd_rg:process_resources[3444] [[ -n '' ]] +epprd_rg:process_resources[3450] clodmget -n -q policy=scsi -f value HACMPsplitmerge +epprd_rg:process_resources[3450] SCSIPR_ENABLED='' +epprd_rg:process_resources[3450] typeset SCSIPR_ENABLED +epprd_rg:process_resources[3451] [[ '' == Yes ]] +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:03:39.805587 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=VGS ACTION=ACQUIRE CONCURRENT_VOLUME_GROUP='""' VOLUME_GROUPS='"datavg"' RESOURCE_GROUPS='"epprd_rg' '"' EXPORT_FILESYSTEM='""' +epprd_rg:process_resources[1] JOB_TYPE=VGS +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] CONCURRENT_VOLUME_GROUP='' +epprd_rg:process_resources[1] VOLUME_GROUPS=datavg +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] EXPORT_FILESYSTEM='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ VGS == RELEASE ]] +epprd_rg:process_resources[3360] [[ VGS == ONLINE ]] +epprd_rg:process_resources[3571] process_volume_groups_main ACQUIRE +epprd_rg:process_resources[process_volume_groups_main:2293] PS4_FUNC=process_volume_groups_main +epprd_rg:process_resources[process_volume_groups_main:2293] typeset PS4_FUNC +epprd_rg:process_resources[process_volume_groups_main:2294] [[ high == high ]] +epprd_rg:process_resources[process_volume_groups_main:2294] set -x +epprd_rg:process_resources[process_volume_groups_main:2295] DEF_VARYON_ACTION=0 +epprd_rg:process_resources[process_volume_groups_main:2295] typeset -li DEF_VARYON_ACTION +epprd_rg:process_resources[process_volume_groups_main:2296] FAILURE_IN_METHOD=0 +epprd_rg:process_resources[process_volume_groups_main:2296] typeset -li FAILURE_IN_METHOD +epprd_rg:process_resources[process_volume_groups_main:2297] ACTION=ACQUIRE +epprd_rg:process_resources[process_volume_groups_main:2297] typeset ACTION +epprd_rg:process_resources[process_volume_groups_main:2298] STAT=0 +epprd_rg:process_resources[process_volume_groups_main:2299] VG_LIST=datavg +epprd_rg:process_resources[process_volume_groups_main:2300] RG_LIST=epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2304] getReplicatedResources epprd_rg +epprd_rg:process_resources[getReplicatedResources:699] PS4_FUNC=getReplicatedResources +epprd_rg:process_resources[getReplicatedResources:699] typeset PS4_FUNC +epprd_rg:process_resources[getReplicatedResources:700] [[ high == high ]] +epprd_rg:process_resources[getReplicatedResources:700] set -x +epprd_rg:process_resources[getReplicatedResources:702] RV=false +epprd_rg:process_resources[getReplicatedResources:704] clodmget -n -f type HACMPrresmethods +epprd_rg:process_resources[getReplicatedResources:704] [[ -n 9 ]] +epprd_rg:process_resources[getReplicatedResources:707] : Replicated resource methods are defined, check for resources +epprd_rg:process_resources[getReplicatedResources:709] clodmget -q $'name like \'*_REP_RESOURCE\' AND group=epprd_rg' -f value -n HACMPresource +epprd_rg:process_resources[getReplicatedResources:709] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:718] : Verify if any backup profiles are configured and trigger cbm utilities based on that +epprd_rg:process_resources[getReplicatedResources:720] clodmget -q name=BACKUP_ENABLED -f value HACMPresource +epprd_rg:process_resources[getReplicatedResources:720] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:739] echo false +epprd_rg:process_resources[process_volume_groups_main:2304] REPLICATED_RESOURCES=false +epprd_rg:process_resources[process_volume_groups_main:2305] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2306] print -- datavg +epprd_rg:process_resources[process_volume_groups_main:2306] read VOLUME_GROUPS VG_LIST +epprd_rg:process_resources[process_volume_groups_main:2306] IFS=: +epprd_rg:process_resources[process_volume_groups_main:2307] VOLUME_GROUPS=datavg +epprd_rg:process_resources[process_volume_groups_main:2310] : At this point, these variables contain information only for epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2312] export VOLUME_GROUPS +epprd_rg:process_resources[process_volume_groups_main:2313] export RESOURCE_GROUPS +epprd_rg:process_resources[process_volume_groups_main:2315] [[ false == true ]] +epprd_rg:process_resources[process_volume_groups_main:2555] process_volume_groups ACQUIRE +epprd_rg:process_resources[process_volume_groups:2571] PS4_FUNC=process_volume_groups +epprd_rg:process_resources[process_volume_groups:2571] typeset PS4_FUNC +epprd_rg:process_resources[process_volume_groups:2572] [[ high == high ]] +epprd_rg:process_resources[process_volume_groups:2572] set -x +epprd_rg:process_resources[process_volume_groups:2573] STAT=0 +epprd_rg:process_resources[process_volume_groups:2575] GROUPNAME=epprd_rg +epprd_rg:process_resources[process_volume_groups:2575] export GROUPNAME +epprd_rg:process_resources[process_volume_groups:2578] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[process_volume_groups:2581] : Varyon the VGs in the environment +epprd_rg:process_resources[process_volume_groups:2583] cl_activate_vgs -n +epprd_rg:cl_activate_vgs[213] [[ high == high ]] +epprd_rg:cl_activate_vgs[213] version=1.46 +epprd_rg:cl_activate_vgs[215] STATUS=0 +epprd_rg:cl_activate_vgs[215] typeset -li STATUS +epprd_rg:cl_activate_vgs[216] SYNCFLAG='' +epprd_rg:cl_activate_vgs[217] CLENV='' +epprd_rg:cl_activate_vgs[218] TMP_FILENAME=/tmp/_activate_vgs.tmp +epprd_rg:cl_activate_vgs[219] USE_OEM_METHODS=false +epprd_rg:cl_activate_vgs[221] PROC_RES=false +epprd_rg:cl_activate_vgs[225] [[ VGS != 0 ]] +epprd_rg:cl_activate_vgs[225] [[ VGS != GROUP ]] +epprd_rg:cl_activate_vgs[226] PROC_RES=true +epprd_rg:cl_activate_vgs[232] [[ -n == -n ]] +epprd_rg:cl_activate_vgs[234] SYNCFLAG=-n +epprd_rg:cl_activate_vgs[235] shift +epprd_rg:cl_activate_vgs[240] (( 0 != 0 )) +epprd_rg:cl_activate_vgs[247] set -u +epprd_rg:cl_activate_vgs[250] rm -f /tmp/_activate_vgs.tmp +epprd_rg:cl_activate_vgs[254] lsvg -L -o +epprd_rg:cl_activate_vgs[254] print caavg_private rootvg +epprd_rg:cl_activate_vgs[254] VGSTATUS='caavg_private rootvg' +epprd_rg:cl_activate_vgs[257] ALLVGS=All_volume_groups +epprd_rg:cl_activate_vgs[258] cl_RMupdate resource_acquiring All_volume_groups cl_activate_vgs 2023-01-28T18:03:39.877790 2023-01-28T18:03:39.882284 +epprd_rg:cl_activate_vgs[262] [[ true == false ]] +epprd_rg:cl_activate_vgs[285] LIST_OF_VOLUME_GROUPS_FOR_RG='' +epprd_rg:cl_activate_vgs[289] export GROUPNAME +epprd_rg:cl_activate_vgs[291] echo datavg +epprd_rg:cl_activate_vgs[291] read LIST_OF_VOLUME_GROUPS_FOR_RG VOLUME_GROUPS +epprd_rg:cl_activate_vgs[291] IFS=: +epprd_rg:cl_activate_vgs[294] echo datavg +epprd_rg:cl_activate_vgs[296] sort -u +epprd_rg:cl_activate_vgs[295] tr , '\n' +epprd_rg:cl_activate_vgs[294] LIST_OF_VOLUME_GROUPS_FOR_RG=datavg +epprd_rg:cl_activate_vgs[298] vgs_list datavg +epprd_rg:cl_activate_vgs[vgs_list:178] PS4_LOOP='' +epprd_rg:cl_activate_vgs[vgs_list:178] typeset PS4_LOOP +epprd_rg:cl_activate_vgs:datavg[vgs_list:182] PS4_LOOP=datavg +epprd_rg:cl_activate_vgs:datavg[vgs_list:186] [[ 'caavg_private rootvg' == @(?(*\ )datavg?(\ *)) ]] +epprd_rg:cl_activate_vgs:datavg[vgs_list:192] : call varyon for the volume group in Foreground +epprd_rg:cl_activate_vgs:datavg[vgs_list:194] vgs_chk datavg -n cl_activate_vgs +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:78] VG=datavg +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:78] typeset VG +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:79] SYNCFLAG=-n +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:79] typeset SYNCFLAG +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:80] PROGNAME=cl_activate_vgs +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:80] typeset PROGNAME +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:81] STATUS=0 +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:81] typeset -li STATUS +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:83] [[ -n '' ]] +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:100] amlog_trace '' 'Activating Volume Group|datavg' +epprd_rg:cl_activate_vgs(0.052):datavg[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_vgs(0.053):datavg[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_vgs(0.077):datavg[amlog_trace:319] cltime +epprd_rg:cl_activate_vgs(0.080):datavg[amlog_trace:319] DATE=2023-01-28T18:03:39.919068 +epprd_rg:cl_activate_vgs(0.080):datavg[amlog_trace:320] echo '|2023-01-28T18:03:39.919068|INFO: Activating Volume Group|datavg' +epprd_rg:cl_activate_vgs(0.080):datavg[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_vgs(0.080):datavg[vgs_chk:102] typeset -x ERRMSG +epprd_rg:cl_activate_vgs(0.080):datavg[vgs_chk:103] clvaryonvg -n datavg +epprd_rg:clvaryonvg(0.009):datavg[985] version=1.21.7.22 +epprd_rg:clvaryonvg(0.009):datavg[989] : Without this test, cause of failure due to non-root may not be obvious +epprd_rg:clvaryonvg(0.009):datavg[991] [[ -z '' ]] +epprd_rg:clvaryonvg(0.009):datavg[991] id -nu +epprd_rg:clvaryonvg(0.010):datavg[991] 2> /dev/null +epprd_rg:clvaryonvg(0.012):datavg[991] user_name=root +epprd_rg:clvaryonvg(0.012):datavg[994] : Check if RBAC is enabled +epprd_rg:clvaryonvg(0.012):datavg[996] is_rbac_enabled='' +epprd_rg:clvaryonvg(0.012):datavg[996] typeset is_rbac_enabled +epprd_rg:clvaryonvg(0.012):datavg[997] clodmget -nq group='LDAPClient and name=RBACConfig' -f value HACMPLDAP +epprd_rg:clvaryonvg(0.013):datavg[997] 2> /dev/null +epprd_rg:clvaryonvg(0.016):datavg[997] is_rbac_enabled='' +epprd_rg:clvaryonvg(0.016):datavg[999] role='' +epprd_rg:clvaryonvg(0.016):datavg[999] typeset role +epprd_rg:clvaryonvg(0.016):datavg[1000] [[ root != root ]] +epprd_rg:clvaryonvg(0.016):datavg[1009] LEAVEOFF=FALSE +epprd_rg:clvaryonvg(0.016):datavg[1010] FORCEON='' +epprd_rg:clvaryonvg(0.016):datavg[1011] FORCEUPD=FALSE +epprd_rg:clvaryonvg(0.016):datavg[1012] NOQUORUM=20 +epprd_rg:clvaryonvg(0.016):datavg[1013] MISSING_UPDATES=30 +epprd_rg:clvaryonvg(0.016):datavg[1014] DATA_DIVERGENCE=31 +epprd_rg:clvaryonvg(0.016):datavg[1015] ARGS='' +epprd_rg:clvaryonvg(0.016):datavg[1016] typeset -li varyonvg_rc +epprd_rg:clvaryonvg(0.016):datavg[1017] typeset -li MAXLVS +epprd_rg:clvaryonvg(0.016):datavg[1018] ENODEV=19 +epprd_rg:clvaryonvg(0.016):datavg[1018] typeset -li ENODEV +epprd_rg:clvaryonvg(0.016):datavg[1020] set -u +epprd_rg:clvaryonvg(0.016):datavg[1022] /bin/dspmsg -s 2 cspoc.cat 31 'usage: clvaryonvg [-F] [-f] [-n] [-p] [-s] [-o] \n' +epprd_rg:clvaryonvg(0.019):datavg[1022] USAGE='usage: clvaryonvg [-F] [-f] [-n] [-p] [-s] [-o] ' +epprd_rg:clvaryonvg(0.019):datavg[1023] (( 2 < 1 )) +epprd_rg:clvaryonvg(0.019):datavg[1029] : Parse the options +epprd_rg:clvaryonvg(0.019):datavg[1031] S_FLAG='' +epprd_rg:clvaryonvg(0.019):datavg[1032] P_FLAG='' +epprd_rg:clvaryonvg(0.019):datavg[1033] getopts :Ffnops option +epprd_rg:clvaryonvg(0.019):datavg[1038] : -n Always applied, retained for compatibility +epprd_rg:clvaryonvg(0.019):datavg[1033] getopts :Ffnops option +epprd_rg:clvaryonvg(0.019):datavg[1048] : Pick up the volume group name, which follows the options +epprd_rg:clvaryonvg(0.019):datavg[1050] shift 1 +epprd_rg:clvaryonvg(0.019):datavg[1051] VG=datavg +epprd_rg:clvaryonvg(0.019):datavg[1054] : Set up filenames we will be using +epprd_rg:clvaryonvg(0.019):datavg[1056] VGDIR=/usr/es/sbin/cluster/etc/vg/ +epprd_rg:clvaryonvg(0.019):datavg[1057] TSFILE=/usr/es/sbin/cluster/etc/vg/datavg.tstamp +epprd_rg:clvaryonvg(0.019):datavg[1058] DSFILE=/usr/es/sbin/cluster/etc/vg/datavg.desc +epprd_rg:clvaryonvg(0.019):datavg[1059] RPFILE=/usr/es/sbin/cluster/etc/vg/datavg.replay +epprd_rg:clvaryonvg(0.019):datavg[1060] permset=/usr/es/sbin/cluster/etc/vg/datavg.perms +epprd_rg:clvaryonvg(0.019):datavg[1061] failfile=/usr/es/sbin/cluster/etc/vg/datavg.fail +epprd_rg:clvaryonvg(0.019):datavg[1065] : Get some LVM information we are going to need in processing this +epprd_rg:clvaryonvg(0.019):datavg[1066] : volume group: +epprd_rg:clvaryonvg(0.019):datavg[1067] : - volume group identifier - vgid +epprd_rg:clvaryonvg(0.019):datavg[1068] : - list of disks +epprd_rg:clvaryonvg(0.019):datavg[1069] : - quorum indicator +epprd_rg:clvaryonvg(0.019):datavg[1070] : - timestamp if present +epprd_rg:clvaryonvg(0.019):datavg[1072] /usr/sbin/getlvodm -v datavg +epprd_rg:clvaryonvg(0.022):datavg[1072] VGID=00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(0.024):datavg[1073] cut '-d ' -f2 +epprd_rg:clvaryonvg(0.023):datavg[1073] /usr/sbin/getlvodm -w 00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(0.027):datavg[1073] pvlst=$'hdisk2\nhdisk3\nhdisk4\nhdisk5\nhdisk6\nhdisk7\nhdisk8' +epprd_rg:clvaryonvg(0.027):datavg[1074] /usr/sbin/getlvodm -Q datavg +epprd_rg:clvaryonvg(0.030):datavg[1074] quorum=y +epprd_rg:clvaryonvg(0.030):datavg[1075] TS_FROM_DISK='' +epprd_rg:clvaryonvg(0.030):datavg[1076] TS_FROM_ODM='' +epprd_rg:clvaryonvg(0.030):datavg[1077] GOOD_PV='' +epprd_rg:clvaryonvg(0.030):datavg[1078] O_flag='' +epprd_rg:clvaryonvg(0.030):datavg[1079] A_flag='' +epprd_rg:clvaryonvg(0.030):datavg[1080] mode_flag='' +epprd_rg:clvaryonvg(0.030):datavg[1081] vg_on_mode='' +epprd_rg:clvaryonvg(0.030):datavg[1082] vg_set_passive=FALSE +epprd_rg:clvaryonvg(0.030):datavg[1084] odmget -q 'attribute = varyon_state' PdAt +epprd_rg:clvaryonvg(0.033):datavg[1084] [[ -n $'\nPdAt:\n\tuniquetype = "logical_volume/vgsubclass/vgtype"\n\tattribute = "varyon_state"\n\tdeflt = "0"\n\tvalues = "0,1,2,3"\n\twidth = ""\n\ttype = "R"\n\tgeneric = ""\n\trep = "l"\n\tnls_index = 0' ]] +epprd_rg:clvaryonvg(0.033):datavg[1087] : LVM may record that a volume group was varied on from an earlier +epprd_rg:clvaryonvg(0.033):datavg[1088] : IPL. Rely on HA state tracking, and override the LVM check +epprd_rg:clvaryonvg(0.033):datavg[1090] O_flag=-O +epprd_rg:clvaryonvg(0.033):datavg[1093] : Checking if SCSI PR is enabled and it is so, +epprd_rg:clvaryonvg(0.033):datavg[1094] : confirming if the SCSI PR reservations are intact. +epprd_rg:clvaryonvg(0.034):datavg[1096] lssrc -ls clstrmgrES +epprd_rg:clvaryonvg(0.035):datavg[1096] 2>& 1 +epprd_rg:clvaryonvg(0.035):datavg[1096] egrep -q -v 'ST_INIT|NOT_CONFIGURED' +epprd_rg:clvaryonvg(0.035):datavg[1096] grep 'Current state:' +epprd_rg:clvaryonvg(0.050):datavg[1098] clodmget -n -q policy=scsi -f value HACMPsplitmerge +epprd_rg:clvaryonvg(0.053):datavg[1098] SCSIPR_ENABLED='' +epprd_rg:clvaryonvg(0.053):datavg[1098] typeset SCSIPR_ENABLED +epprd_rg:clvaryonvg(0.053):datavg[1099] clodmget -q $'name like \'*VOLUME_GROUP\' and value = datavg' -f group -n HACMPresource +epprd_rg:clvaryonvg(0.056):datavg[1099] resgrp=epprd_rg +epprd_rg:clvaryonvg(0.056):datavg[1099] typeset resgrp +epprd_rg:clvaryonvg(0.056):datavg[1100] [[ '' == Yes ]] +epprd_rg:clvaryonvg(0.056):datavg[1134] : Operations such as varying on the volume group are likely to +epprd_rg:clvaryonvg(0.056):datavg[1135] : require read/write access. So, set any volume group fencing appropriately. +epprd_rg:clvaryonvg(0.056):datavg[1137] cl_set_vg_fence_height -c datavg rw +epprd_rg:clvaryonvg(0.060):datavg[1138] RC=0 +epprd_rg:clvaryonvg(0.060):datavg[1139] (( 19 == 0 )) +epprd_rg:clvaryonvg(0.060):datavg[1147] : Return code from volume group fencing for datavg is 0 +epprd_rg:clvaryonvg(0.060):datavg[1148] (( 0 != 0 )) +epprd_rg:clvaryonvg(0.060):datavg[1160] : Check on the current state of the volume group +epprd_rg:clvaryonvg(0.061):datavg[1182] grep -x -q datavg +epprd_rg:clvaryonvg(0.061):datavg[1182] lsvg -L +epprd_rg:clvaryonvg(0.065):datavg[1184] : The volume group is known - check to see if its already varyd on. +epprd_rg:clvaryonvg(0.066):datavg[1186] grep -x -q datavg +epprd_rg:clvaryonvg(0.066):datavg[1186] lsvg -L -o +epprd_rg:clvaryonvg(0.069):datavg[1190] lsvg -L datavg +epprd_rg:clvaryonvg(0.070):datavg[1190] 2> /dev/null +epprd_rg:clvaryonvg(0.069):datavg[1190] grep -q -i -w passive-only +epprd_rg:clvaryonvg(0.112):datavg[1191] vg_on_mode=passive +epprd_rg:clvaryonvg(0.114):datavg[1194] grep -iw removed +epprd_rg:clvaryonvg(0.114):datavg[1194] lsvg -p datavg +epprd_rg:clvaryonvg(0.114):datavg[1194] 2> /dev/null +epprd_rg:clvaryonvg(0.134):datavg[1194] removed_disks='' +epprd_rg:clvaryonvg(0.134):datavg[1195] [[ -n '' ]] +epprd_rg:clvaryonvg(0.134):datavg[1213] [[ -n passive ]] +epprd_rg:clvaryonvg(0.134):datavg[1215] lqueryvg -g 00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(0.135):datavg[1215] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.154):datavg[1321] : +epprd_rg:clvaryonvg(0.154):datavg[1322] : First, sniff at the disk to see if the local ODM information +epprd_rg:clvaryonvg(0.154):datavg[1323] : matches what is on the disk. +epprd_rg:clvaryonvg(0.154):datavg[1324] : +epprd_rg:clvaryonvg(0.154):datavg[1326] vgdatimestamps +epprd_rg:clvaryonvg(0.154):datavg[vgdatimestamps:201] PS4_FUNC=vgdatimestamps +epprd_rg:clvaryonvg(0.154):datavg[vgdatimestamps:201] typeset PS4_FUNC +epprd_rg:clvaryonvg(0.154):datavg[vgdatimestamps:202] [[ high == high ]] +epprd_rg:clvaryonvg(0.155):datavg[vgdatimestamps:202] set -x +epprd_rg:clvaryonvg(0.155):datavg[vgdatimestamps:203] set -u +epprd_rg:clvaryonvg(0.155):datavg[vgdatimestamps:206] : See what timestamp LVM has recorded from the last time it checked +epprd_rg:clvaryonvg(0.155):datavg[vgdatimestamps:207] : the disks +epprd_rg:clvaryonvg(0.155):datavg[vgdatimestamps:209] /usr/sbin/getlvodm -T 00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(0.155):datavg[vgdatimestamps:209] 2> /dev/null +epprd_rg:clvaryonvg(0.158):datavg[vgdatimestamps:209] TS_FROM_ODM=63d4e41f29287594 +epprd_rg:clvaryonvg(0.158):datavg[vgdatimestamps:212] : Check to see if HACMP is maintaining a timestamp for this volume group +epprd_rg:clvaryonvg(0.158):datavg[vgdatimestamps:213] : Needed for some older volume groups +epprd_rg:clvaryonvg(0.158):datavg[vgdatimestamps:215] [[ -s /usr/es/sbin/cluster/etc/vg/datavg.tstamp ]] +epprd_rg:clvaryonvg(0.158):datavg[vgdatimestamps:234] : Get the time stamp from the actual disk +epprd_rg:clvaryonvg(0.158):datavg[vgdatimestamps:236] clvgdats /dev/datavg +epprd_rg:clvaryonvg(0.159):datavg[vgdatimestamps:236] 2> /dev/null +epprd_rg:clvaryonvg(0.168):datavg[vgdatimestamps:236] TS_FROM_DISK=63d4e41f29287594 +epprd_rg:clvaryonvg(0.169):datavg[vgdatimestamps:237] clvgdats_rc=0 +epprd_rg:clvaryonvg(0.169):datavg[vgdatimestamps:238] (( 0 != 0 )) +epprd_rg:clvaryonvg(0.169):datavg[vgdatimestamps:247] [[ -z 63d4e41f29287594 ]] +epprd_rg:clvaryonvg(0.169):datavg[1328] [[ 63d4e41f29287594 != 63d4e41f29287594 ]] +epprd_rg:clvaryonvg(0.169):datavg[1344] : There is a chance that a VG that should be in passive mode is not. +epprd_rg:clvaryonvg(0.169):datavg[1345] : Run cl_pvo to put it in passive mode if possible. +epprd_rg:clvaryonvg(0.169):datavg[1350] [[ -z passive ]] +epprd_rg:clvaryonvg(0.169):datavg[1350] [[ passive == ordinary ]] +epprd_rg:clvaryonvg(0.169):datavg[1350] [[ passive == passive ]] +epprd_rg:clvaryonvg(0.169):datavg[1350] [[ -n '' ]] +epprd_rg:clvaryonvg(0.169):datavg[1381] : Let us assume that the old style synclvodm would sync all the PV/FS changes. +epprd_rg:clvaryonvg(0.169):datavg[1383] expimpvg_notrequired=1 +epprd_rg:clvaryonvg(0.169):datavg[1386] : Optimistically give varyonvg a try. +epprd_rg:clvaryonvg(0.169):datavg[1388] [[ passive == passive ]] +epprd_rg:clvaryonvg(0.169):datavg[1391] : If the volume group was varyd on in passive mode when this node came +epprd_rg:clvaryonvg(0.169):datavg[1392] : up, flip it over to active mode. Following logic will then fall +epprd_rg:clvaryonvg(0.169):datavg[1393] : through to updatefs. +epprd_rg:clvaryonvg(0.169):datavg[1395] [[ passive == passive ]] +epprd_rg:clvaryonvg(0.169):datavg[1395] A_flag=-A +epprd_rg:clvaryonvg(0.169):datavg[1396] varyonvg -n -c -A -O datavg +epprd_rg:clvaryonvg(0.170):datavg[1396] 2>& 1 +epprd_rg:clvaryonvg(0.396):datavg[1396] varyonvg_output='' +epprd_rg:clvaryonvg(0.397):datavg[1397] varyonvg_rc=0 +epprd_rg:clvaryonvg(0.397):datavg[1397] typeset -li varyonvg_rc +epprd_rg:clvaryonvg(0.397):datavg[1399] (( 0 != 0 )) +epprd_rg:clvaryonvg(0.397):datavg[1481] (( 0 != 0 )) +epprd_rg:clvaryonvg(0.397):datavg[1576] : At this point, datavg should be varied on +epprd_rg:clvaryonvg(0.397):datavg[1578] [[ FALSE == TRUE ]] +epprd_rg:clvaryonvg(0.397):datavg[1585] [[ -z 63d4e41f29287594 ]] +epprd_rg:clvaryonvg(0.397):datavg[1592] vgdatimestamps +epprd_rg:clvaryonvg(0.397):datavg[vgdatimestamps:201] PS4_FUNC=vgdatimestamps +epprd_rg:clvaryonvg(0.397):datavg[vgdatimestamps:201] typeset PS4_FUNC +epprd_rg:clvaryonvg(0.397):datavg[vgdatimestamps:202] [[ high == high ]] +epprd_rg:clvaryonvg(0.397):datavg[vgdatimestamps:202] set -x +epprd_rg:clvaryonvg(0.397):datavg[vgdatimestamps:203] set -u +epprd_rg:clvaryonvg(0.397):datavg[vgdatimestamps:206] : See what timestamp LVM has recorded from the last time it checked +epprd_rg:clvaryonvg(0.397):datavg[vgdatimestamps:207] : the disks +epprd_rg:clvaryonvg(0.397):datavg[vgdatimestamps:209] /usr/sbin/getlvodm -T 00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(0.398):datavg[vgdatimestamps:209] 2> /dev/null +epprd_rg:clvaryonvg(0.400):datavg[vgdatimestamps:209] TS_FROM_ODM=63d4e4ec07aab272 +epprd_rg:clvaryonvg(0.400):datavg[vgdatimestamps:212] : Check to see if HACMP is maintaining a timestamp for this volume group +epprd_rg:clvaryonvg(0.400):datavg[vgdatimestamps:213] : Needed for some older volume groups +epprd_rg:clvaryonvg(0.400):datavg[vgdatimestamps:215] [[ -s /usr/es/sbin/cluster/etc/vg/datavg.tstamp ]] +epprd_rg:clvaryonvg(0.401):datavg[vgdatimestamps:234] : Get the time stamp from the actual disk +epprd_rg:clvaryonvg(0.401):datavg[vgdatimestamps:236] clvgdats /dev/datavg +epprd_rg:clvaryonvg(0.401):datavg[vgdatimestamps:236] 2> /dev/null +epprd_rg:clvaryonvg(0.411):datavg[vgdatimestamps:236] TS_FROM_DISK=63d4e4ec07aab272 +epprd_rg:clvaryonvg(0.411):datavg[vgdatimestamps:237] clvgdats_rc=0 +epprd_rg:clvaryonvg(0.411):datavg[vgdatimestamps:238] (( 0 != 0 )) +epprd_rg:clvaryonvg(0.411):datavg[vgdatimestamps:247] [[ -z 63d4e4ec07aab272 ]] +epprd_rg:clvaryonvg(0.411):datavg[1600] [[ 63d4e4ec07aab272 != 63d4e4ec07aab272 ]] +epprd_rg:clvaryonvg(0.411):datavg[1622] [[ FALSE == TRUE ]] +epprd_rg:clvaryonvg(0.411):datavg[1633] : Even if everything looks OK, update the local file system +epprd_rg:clvaryonvg(0.411):datavg[1634] : definitions, since changes there do not show up in the +epprd_rg:clvaryonvg(0.411):datavg[1635] : VGDA timestamps +epprd_rg:clvaryonvg(0.411):datavg[1637] updatefs datavg +epprd_rg:clvaryonvg(0.411):datavg[updatefs:506] PS4_FUNC=updatefs +epprd_rg:clvaryonvg(0.411):datavg[updatefs:506] typeset PS4_FUNC +epprd_rg:clvaryonvg(0.411):datavg[updatefs:507] [[ high == high ]] +epprd_rg:clvaryonvg(0.411):datavg[updatefs:507] set -x +epprd_rg:clvaryonvg(0.411):datavg[updatefs:508] do_imfs='' +epprd_rg:clvaryonvg(0.411):datavg[updatefs:508] typeset do_imfs +epprd_rg:clvaryonvg(0.411):datavg[updatefs:509] has_typed_lvs='' +epprd_rg:clvaryonvg(0.411):datavg[updatefs:509] typeset has_typed_lvs +epprd_rg:clvaryonvg(0.411):datavg[updatefs:512] : Delete existing filesystem information for this volume group. This is +epprd_rg:clvaryonvg(0.411):datavg[updatefs:513] : needed because imfs will not update an existing /etc/filesystems entry. +epprd_rg:clvaryonvg(0.413):datavg[updatefs:515] cut -f1 '-d ' +epprd_rg:clvaryonvg(0.413):datavg[updatefs:515] /usr/sbin/getlvodm -L datavg +epprd_rg:clvaryonvg(0.417):datavg[updatefs:515] lv_list=$'saplv\nsapmntlv\noraclelv\nepplv\noraarchlv\nsapdata1lv\nsapdata2lv\nsapdata3lv\nsapdata4lv\nboardlv\noriglogAlv\noriglogBlv\nmirrlogAlv\nmirrlogBlv\nepprdaloglv' +epprd_rg:clvaryonvg(0.417):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.417):datavg[updatefs:521] clodmget -q 'name = saplv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.420):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.420):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.420):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.420):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.420):datavg[updatefs:530] /usr/sbin/getlvcb -f saplv +epprd_rg:clvaryonvg(0.421):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.439):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.439):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.439):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.441):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.441):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.444):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.445):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.445):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.445):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.446):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.465):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.465):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.465):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.465):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.466):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.466):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.469):datavg[updatefs:545] /usr/sbin/imfs -lx saplv +epprd_rg:clvaryonvg(0.473):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.473):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.473):datavg[updatefs:521] clodmget -q 'name = sapmntlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.477):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.477):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.477):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.477):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.477):datavg[updatefs:530] /usr/sbin/getlvcb -f sapmntlv +epprd_rg:clvaryonvg(0.478):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.495):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.495):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.495):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.497):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.497):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.500):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.500):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.500):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.500):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.502):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.520):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.520):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.520):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.520):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.522):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.521):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.525):datavg[updatefs:545] /usr/sbin/imfs -lx sapmntlv +epprd_rg:clvaryonvg(0.529):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.529):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.529):datavg[updatefs:521] clodmget -q 'name = oraclelv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.532):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.532):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.532):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.532):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.532):datavg[updatefs:530] /usr/sbin/getlvcb -f oraclelv +epprd_rg:clvaryonvg(0.533):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.551):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.551):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.551):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.553):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.553):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.556):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.556):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.556):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.556):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.558):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.578):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.578):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.578):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.578):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.579):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.579):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.582):datavg[updatefs:545] /usr/sbin/imfs -lx oraclelv +epprd_rg:clvaryonvg(0.587):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.587):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.587):datavg[updatefs:521] clodmget -q 'name = epplv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.590):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.590):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.590):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.590):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.590):datavg[updatefs:530] /usr/sbin/getlvcb -f epplv +epprd_rg:clvaryonvg(0.591):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.610):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.610):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.610):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.612):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.612):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.615):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.616):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.616):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.616):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.617):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.636):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.636):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.636):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.636):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.637):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.637):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.640):datavg[updatefs:545] /usr/sbin/imfs -lx epplv +epprd_rg:clvaryonvg(0.645):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.645):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.645):datavg[updatefs:521] clodmget -q 'name = oraarchlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.648):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.648):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.648):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.648):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.648):datavg[updatefs:530] /usr/sbin/getlvcb -f oraarchlv +epprd_rg:clvaryonvg(0.649):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.667):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.667):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.667):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.669):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.669):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.672):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.672):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.672):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.672):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.674):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.692):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.692):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.692):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.692):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.693):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.693):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.696):datavg[updatefs:545] /usr/sbin/imfs -lx oraarchlv +epprd_rg:clvaryonvg(0.701):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.701):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.701):datavg[updatefs:521] clodmget -q 'name = sapdata1lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.704):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.704):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.704):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.704):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.704):datavg[updatefs:530] /usr/sbin/getlvcb -f sapdata1lv +epprd_rg:clvaryonvg(0.705):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.722):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.722):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.722):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.724):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.724):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.728):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.728):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.728):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.728):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.729):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.748):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.748):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.748):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.748):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.749):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.749):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.752):datavg[updatefs:545] /usr/sbin/imfs -lx sapdata1lv +epprd_rg:clvaryonvg(0.756):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.756):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.756):datavg[updatefs:521] clodmget -q 'name = sapdata2lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.760):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.760):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.760):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.760):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.760):datavg[updatefs:530] /usr/sbin/getlvcb -f sapdata2lv +epprd_rg:clvaryonvg(0.761):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.778):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.778):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.778):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.780):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.780):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.784):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.784):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.784):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.784):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.785):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.804):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.804):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.804):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.804):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.805):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.805):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.808):datavg[updatefs:545] /usr/sbin/imfs -lx sapdata2lv +epprd_rg:clvaryonvg(0.812):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.812):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.812):datavg[updatefs:521] clodmget -q 'name = sapdata3lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.816):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.816):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.816):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.816):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.816):datavg[updatefs:530] /usr/sbin/getlvcb -f sapdata3lv +epprd_rg:clvaryonvg(0.817):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.835):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.835):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.835):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.837):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.837):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.840):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.840):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.840):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.840):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.842):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.861):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.861):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.861):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.861):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.862):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.862):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.865):datavg[updatefs:545] /usr/sbin/imfs -lx sapdata3lv +epprd_rg:clvaryonvg(0.870):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.870):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.870):datavg[updatefs:521] clodmget -q 'name = sapdata4lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.873):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.873):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.873):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.873):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.873):datavg[updatefs:530] /usr/sbin/getlvcb -f sapdata4lv +epprd_rg:clvaryonvg(0.874):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.892):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.892):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.892):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.893):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.894):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.897):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.897):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.897):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.897):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.898):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.917):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.917):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.917):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.917):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.918):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.918):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.921):datavg[updatefs:545] /usr/sbin/imfs -lx sapdata4lv +epprd_rg:clvaryonvg(0.925):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.925):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.925):datavg[updatefs:521] clodmget -q 'name = boardlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.929):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.929):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.929):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.929):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.929):datavg[updatefs:530] /usr/sbin/getlvcb -f boardlv +epprd_rg:clvaryonvg(0.930):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.947):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.947):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.947):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.949):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.949):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.952):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.952):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.952):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.953):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.954):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.973):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.973):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.973):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.973):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.974):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.974):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.977):datavg[updatefs:545] /usr/sbin/imfs -lx boardlv +epprd_rg:clvaryonvg(0.981):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.981):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.981):datavg[updatefs:521] clodmget -q 'name = origlogAlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.985):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.985):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.985):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.985):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.985):datavg[updatefs:530] /usr/sbin/getlvcb -f origlogAlv +epprd_rg:clvaryonvg(0.986):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.004):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.004):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.004):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.006):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.006):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(1.009):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.009):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.009):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.009):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.010):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.029):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.029):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.029):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.029):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.030):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.030):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.033):datavg[updatefs:545] /usr/sbin/imfs -lx origlogAlv +epprd_rg:clvaryonvg(1.037):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.037):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.037):datavg[updatefs:521] clodmget -q 'name = origlogBlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.041):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.041):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.041):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.041):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.041):datavg[updatefs:530] /usr/sbin/getlvcb -f origlogBlv +epprd_rg:clvaryonvg(1.042):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.059):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.060):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.060):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.061):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.061):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(1.065):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.065):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.065):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.065):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.066):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.085):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.085):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.085):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.085):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.086):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.086):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.089):datavg[updatefs:545] /usr/sbin/imfs -lx origlogBlv +epprd_rg:clvaryonvg(1.093):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.093):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.093):datavg[updatefs:521] clodmget -q 'name = mirrlogAlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.097):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.097):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.097):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.097):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.097):datavg[updatefs:530] /usr/sbin/getlvcb -f mirrlogAlv +epprd_rg:clvaryonvg(1.098):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.116):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.116):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.116):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.117):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.117):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(1.121):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.121):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.121):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.121):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.122):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.141):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.141):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.141):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.141):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.142):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.142):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.145):datavg[updatefs:545] /usr/sbin/imfs -lx mirrlogAlv +epprd_rg:clvaryonvg(1.149):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.149):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.149):datavg[updatefs:521] clodmget -q 'name = mirrlogBlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.153):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.153):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.153):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.153):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.153):datavg[updatefs:530] /usr/sbin/getlvcb -f mirrlogBlv +epprd_rg:clvaryonvg(1.154):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.171):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.171):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.171):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.173):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.173):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(1.176):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.176):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.176):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.176):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.178):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.196):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.196):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.196):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.196):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.198):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.198):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.201):datavg[updatefs:545] /usr/sbin/imfs -lx mirrlogBlv +epprd_rg:clvaryonvg(1.205):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.205):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.205):datavg[updatefs:521] clodmget -q 'name = epprdaloglv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.208):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.208):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.208):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.208):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.208):datavg[updatefs:530] /usr/sbin/getlvcb -f epprdaloglv +epprd_rg:clvaryonvg(1.209):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.227):datavg[updatefs:530] fs_info=' ' +epprd_rg:clvaryonvg(1.227):datavg[updatefs:531] [[ -n ' ' ]] +epprd_rg:clvaryonvg(1.227):datavg[updatefs:531] [[ ' ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.227):datavg[updatefs:552] [[ -n true ]] +epprd_rg:clvaryonvg(1.227):datavg[updatefs:556] : Pick up any file system changes that may have happened when +epprd_rg:clvaryonvg(1.227):datavg[updatefs:557] : the volume group was owned by another node. That is, if a +epprd_rg:clvaryonvg(1.227):datavg[updatefs:558] : local change was made - not through C-SPOC, we whould have no +epprd_rg:clvaryonvg(1.227):datavg[updatefs:559] : indication it happened. +epprd_rg:clvaryonvg(1.227):datavg[updatefs:561] [[ -z '' ]] +epprd_rg:clvaryonvg(1.227):datavg[updatefs:563] /usr/sbin/imfs datavg +epprd_rg:clvaryonvg(1.900):datavg[updatefs:589] : For a valid file system configuration, the mount point in +epprd_rg:clvaryonvg(1.900):datavg[updatefs:590] : /etc/filesystems for the logical volume should match the +epprd_rg:clvaryonvg(1.900):datavg[updatefs:591] : label of the logical volume. The above imfs should have +epprd_rg:clvaryonvg(1.900):datavg[updatefs:592] : matched those two. Now, check that they match the label +epprd_rg:clvaryonvg(1.900):datavg[updatefs:593] : for the logical volume as saved in ODM. +epprd_rg:clvaryonvg(1.900):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.900):datavg[updatefs:600] clodmget -q 'name = saplv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.904):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(1.904):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(1.904):datavg[updatefs:607] /usr/sbin/getlvcb -f saplv +epprd_rg:clvaryonvg(1.923):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.923):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(1.923):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(1.923):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(1.923):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(1.923):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.923):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.923):datavg[updatefs:623] : Label and file system type from LVCB on disk for saplv +epprd_rg:clvaryonvg(1.924):datavg[updatefs:625] getlvcb -T -A saplv +epprd_rg:clvaryonvg(1.924):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(1.927):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(1.930):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(1.932):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(1.945):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(1.945):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(1.945):datavg[updatefs:632] : Mount point in /etc/filesystems for saplv +epprd_rg:clvaryonvg(1.946):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/saplv$' /etc/filesystems +epprd_rg:clvaryonvg(1.949):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(1.949):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(1.953):datavg[updatefs:634] fs_mount_point=/usr/sap +epprd_rg:clvaryonvg(1.953):datavg[updatefs:637] : CuAt label attribute for saplv +epprd_rg:clvaryonvg(1.953):datavg[updatefs:639] clodmget -q 'name = saplv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(1.957):datavg[updatefs:639] CuAt_label=/usr/sap +epprd_rg:clvaryonvg(1.958):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(1.959):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(1.963):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(1.963):datavg[updatefs:657] [[ -z /usr/sap ]] +epprd_rg:clvaryonvg(1.963):datavg[updatefs:657] [[ /usr/sap == None ]] +epprd_rg:clvaryonvg(1.963):datavg[updatefs:665] [[ /usr/sap == /usr/sap ]] +epprd_rg:clvaryonvg(1.963):datavg[updatefs:665] [[ /usr/sap != /usr/sap ]] +epprd_rg:clvaryonvg(1.963):datavg[updatefs:685] [[ /usr/sap != /usr/sap ]] +epprd_rg:clvaryonvg(1.963):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.963):datavg[updatefs:600] clodmget -q 'name = sapmntlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.966):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(1.966):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(1.966):datavg[updatefs:607] /usr/sbin/getlvcb -f sapmntlv +epprd_rg:clvaryonvg(1.983):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.983):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(1.983):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(1.983):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(1.983):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(1.983):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.984):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.984):datavg[updatefs:623] : Label and file system type from LVCB on disk for sapmntlv +epprd_rg:clvaryonvg(1.984):datavg[updatefs:625] getlvcb -T -A sapmntlv +epprd_rg:clvaryonvg(1.985):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(1.988):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(1.991):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(1.993):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.005):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.005):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.005):datavg[updatefs:632] : Mount point in /etc/filesystems for sapmntlv +epprd_rg:clvaryonvg(2.007):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/sapmntlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.009):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.011):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.014):datavg[updatefs:634] fs_mount_point=/sapmnt +epprd_rg:clvaryonvg(2.014):datavg[updatefs:637] : CuAt label attribute for sapmntlv +epprd_rg:clvaryonvg(2.014):datavg[updatefs:639] clodmget -q 'name = sapmntlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.018):datavg[updatefs:639] CuAt_label=/sapmnt +epprd_rg:clvaryonvg(2.019):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.021):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.023):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.023):datavg[updatefs:657] [[ -z /sapmnt ]] +epprd_rg:clvaryonvg(2.023):datavg[updatefs:657] [[ /sapmnt == None ]] +epprd_rg:clvaryonvg(2.023):datavg[updatefs:665] [[ /sapmnt == /sapmnt ]] +epprd_rg:clvaryonvg(2.023):datavg[updatefs:665] [[ /sapmnt != /sapmnt ]] +epprd_rg:clvaryonvg(2.023):datavg[updatefs:685] [[ /sapmnt != /sapmnt ]] +epprd_rg:clvaryonvg(2.024):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.024):datavg[updatefs:600] clodmget -q 'name = oraclelv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.027):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.027):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.027):datavg[updatefs:607] /usr/sbin/getlvcb -f oraclelv +epprd_rg:clvaryonvg(2.044):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.044):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.044):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.044):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.044):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.044):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.044):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.044):datavg[updatefs:623] : Label and file system type from LVCB on disk for oraclelv +epprd_rg:clvaryonvg(2.045):datavg[updatefs:625] getlvcb -T -A oraclelv +epprd_rg:clvaryonvg(2.045):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.048):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.051):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.053):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.066):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.066):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.066):datavg[updatefs:632] : Mount point in /etc/filesystems for oraclelv +epprd_rg:clvaryonvg(2.068):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/oraclelv$' /etc/filesystems +epprd_rg:clvaryonvg(2.070):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.072):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.075):datavg[updatefs:634] fs_mount_point=/oracle +epprd_rg:clvaryonvg(2.075):datavg[updatefs:637] : CuAt label attribute for oraclelv +epprd_rg:clvaryonvg(2.075):datavg[updatefs:639] clodmget -q 'name = oraclelv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.078):datavg[updatefs:639] CuAt_label=/oracle +epprd_rg:clvaryonvg(2.080):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.081):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.084):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.084):datavg[updatefs:657] [[ -z /oracle ]] +epprd_rg:clvaryonvg(2.084):datavg[updatefs:657] [[ /oracle == None ]] +epprd_rg:clvaryonvg(2.084):datavg[updatefs:665] [[ /oracle == /oracle ]] +epprd_rg:clvaryonvg(2.084):datavg[updatefs:665] [[ /oracle != /oracle ]] +epprd_rg:clvaryonvg(2.084):datavg[updatefs:685] [[ /oracle != /oracle ]] +epprd_rg:clvaryonvg(2.084):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.084):datavg[updatefs:600] clodmget -q 'name = epplv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.087):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.087):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.087):datavg[updatefs:607] /usr/sbin/getlvcb -f epplv +epprd_rg:clvaryonvg(2.105):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.105):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.105):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.105):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.105):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.105):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.105):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.105):datavg[updatefs:623] : Label and file system type from LVCB on disk for epplv +epprd_rg:clvaryonvg(2.106):datavg[updatefs:625] getlvcb -T -A epplv +epprd_rg:clvaryonvg(2.107):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.110):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.113):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.114):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.128):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.128):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.128):datavg[updatefs:632] : Mount point in /etc/filesystems for epplv +epprd_rg:clvaryonvg(2.129):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/epplv$' /etc/filesystems +epprd_rg:clvaryonvg(2.132):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.133):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.137):datavg[updatefs:634] fs_mount_point=/oracle/EPP +epprd_rg:clvaryonvg(2.137):datavg[updatefs:637] : CuAt label attribute for epplv +epprd_rg:clvaryonvg(2.137):datavg[updatefs:639] clodmget -q 'name = epplv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.140):datavg[updatefs:639] CuAt_label=/oracle/EPP +epprd_rg:clvaryonvg(2.142):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.143):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.146):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.146):datavg[updatefs:657] [[ -z /oracle/EPP ]] +epprd_rg:clvaryonvg(2.146):datavg[updatefs:657] [[ /oracle/EPP == None ]] +epprd_rg:clvaryonvg(2.146):datavg[updatefs:665] [[ /oracle/EPP == /oracle/EPP ]] +epprd_rg:clvaryonvg(2.146):datavg[updatefs:665] [[ /oracle/EPP != /oracle/EPP ]] +epprd_rg:clvaryonvg(2.146):datavg[updatefs:685] [[ /oracle/EPP != /oracle/EPP ]] +epprd_rg:clvaryonvg(2.146):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.146):datavg[updatefs:600] clodmget -q 'name = oraarchlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.149):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.149):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.149):datavg[updatefs:607] /usr/sbin/getlvcb -f oraarchlv +epprd_rg:clvaryonvg(2.167):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.167):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.167):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.167):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.167):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.167):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.167):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.167):datavg[updatefs:623] : Label and file system type from LVCB on disk for oraarchlv +epprd_rg:clvaryonvg(2.168):datavg[updatefs:625] getlvcb -T -A oraarchlv +epprd_rg:clvaryonvg(2.168):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.171):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.174):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.176):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.189):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.189):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.189):datavg[updatefs:632] : Mount point in /etc/filesystems for oraarchlv +epprd_rg:clvaryonvg(2.190):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/oraarchlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.193):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.194):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.198):datavg[updatefs:634] fs_mount_point=/oracle/EPP/oraarch +epprd_rg:clvaryonvg(2.198):datavg[updatefs:637] : CuAt label attribute for oraarchlv +epprd_rg:clvaryonvg(2.198):datavg[updatefs:639] clodmget -q 'name = oraarchlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.201):datavg[updatefs:639] CuAt_label=/oracle/EPP/oraarch +epprd_rg:clvaryonvg(2.202):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.204):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.207):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.207):datavg[updatefs:657] [[ -z /oracle/EPP/oraarch ]] +epprd_rg:clvaryonvg(2.207):datavg[updatefs:657] [[ /oracle/EPP/oraarch == None ]] +epprd_rg:clvaryonvg(2.207):datavg[updatefs:665] [[ /oracle/EPP/oraarch == /oracle/EPP/oraarch ]] +epprd_rg:clvaryonvg(2.207):datavg[updatefs:665] [[ /oracle/EPP/oraarch != /oracle/EPP/oraarch ]] +epprd_rg:clvaryonvg(2.207):datavg[updatefs:685] [[ /oracle/EPP/oraarch != /oracle/EPP/oraarch ]] +epprd_rg:clvaryonvg(2.207):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.207):datavg[updatefs:600] clodmget -q 'name = sapdata1lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.210):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.210):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.210):datavg[updatefs:607] /usr/sbin/getlvcb -f sapdata1lv +epprd_rg:clvaryonvg(2.228):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.228):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.228):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.228):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.228):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.228):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.228):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.228):datavg[updatefs:623] : Label and file system type from LVCB on disk for sapdata1lv +epprd_rg:clvaryonvg(2.229):datavg[updatefs:625] getlvcb -T -A sapdata1lv +epprd_rg:clvaryonvg(2.229):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.232):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.235):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.237):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.250):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.250):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.250):datavg[updatefs:632] : Mount point in /etc/filesystems for sapdata1lv +epprd_rg:clvaryonvg(2.251):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/sapdata1lv$' /etc/filesystems +epprd_rg:clvaryonvg(2.254):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.255):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.259):datavg[updatefs:634] fs_mount_point=/oracle/EPP/sapdata1 +epprd_rg:clvaryonvg(2.259):datavg[updatefs:637] : CuAt label attribute for sapdata1lv +epprd_rg:clvaryonvg(2.259):datavg[updatefs:639] clodmget -q 'name = sapdata1lv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.262):datavg[updatefs:639] CuAt_label=/oracle/EPP/sapdata1 +epprd_rg:clvaryonvg(2.267):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.268):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.271):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.271):datavg[updatefs:657] [[ -z /oracle/EPP/sapdata1 ]] +epprd_rg:clvaryonvg(2.271):datavg[updatefs:657] [[ /oracle/EPP/sapdata1 == None ]] +epprd_rg:clvaryonvg(2.272):datavg[updatefs:665] [[ /oracle/EPP/sapdata1 == /oracle/EPP/sapdata1 ]] +epprd_rg:clvaryonvg(2.272):datavg[updatefs:665] [[ /oracle/EPP/sapdata1 != /oracle/EPP/sapdata1 ]] +epprd_rg:clvaryonvg(2.272):datavg[updatefs:685] [[ /oracle/EPP/sapdata1 != /oracle/EPP/sapdata1 ]] +epprd_rg:clvaryonvg(2.272):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.272):datavg[updatefs:600] clodmget -q 'name = sapdata2lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.275):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.275):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.275):datavg[updatefs:607] /usr/sbin/getlvcb -f sapdata2lv +epprd_rg:clvaryonvg(2.293):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.294):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.294):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.294):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.294):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.294):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.294):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.294):datavg[updatefs:623] : Label and file system type from LVCB on disk for sapdata2lv +epprd_rg:clvaryonvg(2.295):datavg[updatefs:625] getlvcb -T -A sapdata2lv +epprd_rg:clvaryonvg(2.295):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.298):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.301):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.303):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.316):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.316):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.316):datavg[updatefs:632] : Mount point in /etc/filesystems for sapdata2lv +epprd_rg:clvaryonvg(2.317):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/sapdata2lv$' /etc/filesystems +epprd_rg:clvaryonvg(2.320):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.321):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.324):datavg[updatefs:634] fs_mount_point=/oracle/EPP/sapdata2 +epprd_rg:clvaryonvg(2.324):datavg[updatefs:637] : CuAt label attribute for sapdata2lv +epprd_rg:clvaryonvg(2.324):datavg[updatefs:639] clodmget -q 'name = sapdata2lv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.328):datavg[updatefs:639] CuAt_label=/oracle/EPP/sapdata2 +epprd_rg:clvaryonvg(2.329):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.330):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.333):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.333):datavg[updatefs:657] [[ -z /oracle/EPP/sapdata2 ]] +epprd_rg:clvaryonvg(2.333):datavg[updatefs:657] [[ /oracle/EPP/sapdata2 == None ]] +epprd_rg:clvaryonvg(2.334):datavg[updatefs:665] [[ /oracle/EPP/sapdata2 == /oracle/EPP/sapdata2 ]] +epprd_rg:clvaryonvg(2.334):datavg[updatefs:665] [[ /oracle/EPP/sapdata2 != /oracle/EPP/sapdata2 ]] +epprd_rg:clvaryonvg(2.334):datavg[updatefs:685] [[ /oracle/EPP/sapdata2 != /oracle/EPP/sapdata2 ]] +epprd_rg:clvaryonvg(2.334):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.334):datavg[updatefs:600] clodmget -q 'name = sapdata3lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.337):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.337):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.337):datavg[updatefs:607] /usr/sbin/getlvcb -f sapdata3lv +epprd_rg:clvaryonvg(2.354):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.354):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.354):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.355):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.355):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.355):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.355):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.355):datavg[updatefs:623] : Label and file system type from LVCB on disk for sapdata3lv +epprd_rg:clvaryonvg(2.356):datavg[updatefs:625] getlvcb -T -A sapdata3lv +epprd_rg:clvaryonvg(2.356):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.359):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.362):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.364):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.376):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.376):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.376):datavg[updatefs:632] : Mount point in /etc/filesystems for sapdata3lv +epprd_rg:clvaryonvg(2.378):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/sapdata3lv$' /etc/filesystems +epprd_rg:clvaryonvg(2.380):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.382):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.385):datavg[updatefs:634] fs_mount_point=/oracle/EPP/sapdata3 +epprd_rg:clvaryonvg(2.385):datavg[updatefs:637] : CuAt label attribute for sapdata3lv +epprd_rg:clvaryonvg(2.385):datavg[updatefs:639] clodmget -q 'name = sapdata3lv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.389):datavg[updatefs:639] CuAt_label=/oracle/EPP/sapdata3 +epprd_rg:clvaryonvg(2.390):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.391):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.394):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.394):datavg[updatefs:657] [[ -z /oracle/EPP/sapdata3 ]] +epprd_rg:clvaryonvg(2.394):datavg[updatefs:657] [[ /oracle/EPP/sapdata3 == None ]] +epprd_rg:clvaryonvg(2.394):datavg[updatefs:665] [[ /oracle/EPP/sapdata3 == /oracle/EPP/sapdata3 ]] +epprd_rg:clvaryonvg(2.394):datavg[updatefs:665] [[ /oracle/EPP/sapdata3 != /oracle/EPP/sapdata3 ]] +epprd_rg:clvaryonvg(2.394):datavg[updatefs:685] [[ /oracle/EPP/sapdata3 != /oracle/EPP/sapdata3 ]] +epprd_rg:clvaryonvg(2.394):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.395):datavg[updatefs:600] clodmget -q 'name = sapdata4lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.398):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.398):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.398):datavg[updatefs:607] /usr/sbin/getlvcb -f sapdata4lv +epprd_rg:clvaryonvg(2.415):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.415):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.415):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.415):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.415):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.415):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.415):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.415):datavg[updatefs:623] : Label and file system type from LVCB on disk for sapdata4lv +epprd_rg:clvaryonvg(2.416):datavg[updatefs:625] getlvcb -T -A sapdata4lv +epprd_rg:clvaryonvg(2.417):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.420):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.423):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.425):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.438):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.438):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.438):datavg[updatefs:632] : Mount point in /etc/filesystems for sapdata4lv +epprd_rg:clvaryonvg(2.440):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/sapdata4lv$' /etc/filesystems +epprd_rg:clvaryonvg(2.442):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.444):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.447):datavg[updatefs:634] fs_mount_point=/oracle/EPP/sapdata4 +epprd_rg:clvaryonvg(2.447):datavg[updatefs:637] : CuAt label attribute for sapdata4lv +epprd_rg:clvaryonvg(2.447):datavg[updatefs:639] clodmget -q 'name = sapdata4lv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.450):datavg[updatefs:639] CuAt_label=/oracle/EPP/sapdata4 +epprd_rg:clvaryonvg(2.452):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.453):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.456):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.456):datavg[updatefs:657] [[ -z /oracle/EPP/sapdata4 ]] +epprd_rg:clvaryonvg(2.456):datavg[updatefs:657] [[ /oracle/EPP/sapdata4 == None ]] +epprd_rg:clvaryonvg(2.456):datavg[updatefs:665] [[ /oracle/EPP/sapdata4 == /oracle/EPP/sapdata4 ]] +epprd_rg:clvaryonvg(2.456):datavg[updatefs:665] [[ /oracle/EPP/sapdata4 != /oracle/EPP/sapdata4 ]] +epprd_rg:clvaryonvg(2.456):datavg[updatefs:685] [[ /oracle/EPP/sapdata4 != /oracle/EPP/sapdata4 ]] +epprd_rg:clvaryonvg(2.456):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.456):datavg[updatefs:600] clodmget -q 'name = boardlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.459):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.459):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.459):datavg[updatefs:607] /usr/sbin/getlvcb -f boardlv +epprd_rg:clvaryonvg(2.477):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.477):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.477):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.477):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.477):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.477):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.477):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.477):datavg[updatefs:623] : Label and file system type from LVCB on disk for boardlv +epprd_rg:clvaryonvg(2.478):datavg[updatefs:625] getlvcb -T -A boardlv +epprd_rg:clvaryonvg(2.478):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.481):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.484):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.486):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.498):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.498):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.498):datavg[updatefs:632] : Mount point in /etc/filesystems for boardlv +epprd_rg:clvaryonvg(2.500):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/boardlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.502):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.504):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.508):datavg[updatefs:634] fs_mount_point=/board_org +epprd_rg:clvaryonvg(2.508):datavg[updatefs:637] : CuAt label attribute for boardlv +epprd_rg:clvaryonvg(2.508):datavg[updatefs:639] clodmget -q 'name = boardlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.512):datavg[updatefs:639] CuAt_label=/board_org +epprd_rg:clvaryonvg(2.515):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.517):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.520):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.520):datavg[updatefs:657] [[ -z /board_org ]] +epprd_rg:clvaryonvg(2.520):datavg[updatefs:657] [[ /board_org == None ]] +epprd_rg:clvaryonvg(2.520):datavg[updatefs:665] [[ /board_org == /board_org ]] +epprd_rg:clvaryonvg(2.521):datavg[updatefs:665] [[ /board_org != /board_org ]] +epprd_rg:clvaryonvg(2.521):datavg[updatefs:685] [[ /board_org != /board_org ]] +epprd_rg:clvaryonvg(2.521):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.521):datavg[updatefs:600] clodmget -q 'name = origlogAlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.524):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.524):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.524):datavg[updatefs:607] /usr/sbin/getlvcb -f origlogAlv +epprd_rg:clvaryonvg(2.540):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.540):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.540):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.540):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.540):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.540):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.540):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.541):datavg[updatefs:623] : Label and file system type from LVCB on disk for origlogAlv +epprd_rg:clvaryonvg(2.541):datavg[updatefs:625] getlvcb -T -A origlogAlv +epprd_rg:clvaryonvg(2.542):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.545):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.548):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.550):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.562):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.562):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.562):datavg[updatefs:632] : Mount point in /etc/filesystems for origlogAlv +epprd_rg:clvaryonvg(2.563):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/origlogAlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.566):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.567):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.570):datavg[updatefs:634] fs_mount_point=/oracle/EPP/origlogA +epprd_rg:clvaryonvg(2.570):datavg[updatefs:637] : CuAt label attribute for origlogAlv +epprd_rg:clvaryonvg(2.570):datavg[updatefs:639] clodmget -q 'name = origlogAlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.574):datavg[updatefs:639] CuAt_label=/oracle/EPP/origlogA +epprd_rg:clvaryonvg(2.575):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.576):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.580):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.580):datavg[updatefs:657] [[ -z /oracle/EPP/origlogA ]] +epprd_rg:clvaryonvg(2.580):datavg[updatefs:657] [[ /oracle/EPP/origlogA == None ]] +epprd_rg:clvaryonvg(2.580):datavg[updatefs:665] [[ /oracle/EPP/origlogA == /oracle/EPP/origlogA ]] +epprd_rg:clvaryonvg(2.580):datavg[updatefs:665] [[ /oracle/EPP/origlogA != /oracle/EPP/origlogA ]] +epprd_rg:clvaryonvg(2.580):datavg[updatefs:685] [[ /oracle/EPP/origlogA != /oracle/EPP/origlogA ]] +epprd_rg:clvaryonvg(2.580):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.580):datavg[updatefs:600] clodmget -q 'name = origlogBlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.583):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.583):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.583):datavg[updatefs:607] /usr/sbin/getlvcb -f origlogBlv +epprd_rg:clvaryonvg(2.601):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.601):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.601):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.601):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.601):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.601):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.601):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.601):datavg[updatefs:623] : Label and file system type from LVCB on disk for origlogBlv +epprd_rg:clvaryonvg(2.602):datavg[updatefs:625] getlvcb -T -A origlogBlv +epprd_rg:clvaryonvg(2.602):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.605):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.608):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.610):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.623):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.623):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.623):datavg[updatefs:632] : Mount point in /etc/filesystems for origlogBlv +epprd_rg:clvaryonvg(2.624):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/origlogBlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.627):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.628):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.632):datavg[updatefs:634] fs_mount_point=/oracle/EPP/origlogB +epprd_rg:clvaryonvg(2.632):datavg[updatefs:637] : CuAt label attribute for origlogBlv +epprd_rg:clvaryonvg(2.632):datavg[updatefs:639] clodmget -q 'name = origlogBlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.635):datavg[updatefs:639] CuAt_label=/oracle/EPP/origlogB +epprd_rg:clvaryonvg(2.636):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.638):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.641):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.641):datavg[updatefs:657] [[ -z /oracle/EPP/origlogB ]] +epprd_rg:clvaryonvg(2.641):datavg[updatefs:657] [[ /oracle/EPP/origlogB == None ]] +epprd_rg:clvaryonvg(2.641):datavg[updatefs:665] [[ /oracle/EPP/origlogB == /oracle/EPP/origlogB ]] +epprd_rg:clvaryonvg(2.641):datavg[updatefs:665] [[ /oracle/EPP/origlogB != /oracle/EPP/origlogB ]] +epprd_rg:clvaryonvg(2.641):datavg[updatefs:685] [[ /oracle/EPP/origlogB != /oracle/EPP/origlogB ]] +epprd_rg:clvaryonvg(2.641):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.641):datavg[updatefs:600] clodmget -q 'name = mirrlogAlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.644):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.644):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.644):datavg[updatefs:607] /usr/sbin/getlvcb -f mirrlogAlv +epprd_rg:clvaryonvg(2.661):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.662):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.662):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.662):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.662):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.662):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.662):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.662):datavg[updatefs:623] : Label and file system type from LVCB on disk for mirrlogAlv +epprd_rg:clvaryonvg(2.663):datavg[updatefs:625] getlvcb -T -A mirrlogAlv +epprd_rg:clvaryonvg(2.663):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.666):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.669):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.671):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.684):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.684):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.684):datavg[updatefs:632] : Mount point in /etc/filesystems for mirrlogAlv +epprd_rg:clvaryonvg(2.685):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/mirrlogAlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.688):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.689):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.692):datavg[updatefs:634] fs_mount_point=/oracle/EPP/mirrlogA +epprd_rg:clvaryonvg(2.692):datavg[updatefs:637] : CuAt label attribute for mirrlogAlv +epprd_rg:clvaryonvg(2.693):datavg[updatefs:639] clodmget -q 'name = mirrlogAlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.696):datavg[updatefs:639] CuAt_label=/oracle/EPP/mirrlogA +epprd_rg:clvaryonvg(2.697):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.699):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.702):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.702):datavg[updatefs:657] [[ -z /oracle/EPP/mirrlogA ]] +epprd_rg:clvaryonvg(2.702):datavg[updatefs:657] [[ /oracle/EPP/mirrlogA == None ]] +epprd_rg:clvaryonvg(2.702):datavg[updatefs:665] [[ /oracle/EPP/mirrlogA == /oracle/EPP/mirrlogA ]] +epprd_rg:clvaryonvg(2.702):datavg[updatefs:665] [[ /oracle/EPP/mirrlogA != /oracle/EPP/mirrlogA ]] +epprd_rg:clvaryonvg(2.702):datavg[updatefs:685] [[ /oracle/EPP/mirrlogA != /oracle/EPP/mirrlogA ]] +epprd_rg:clvaryonvg(2.702):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.702):datavg[updatefs:600] clodmget -q 'name = mirrlogBlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.705):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.705):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.705):datavg[updatefs:607] /usr/sbin/getlvcb -f mirrlogBlv +epprd_rg:clvaryonvg(2.722):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.722):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.722):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.722):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.722):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.722):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.722):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.722):datavg[updatefs:623] : Label and file system type from LVCB on disk for mirrlogBlv +epprd_rg:clvaryonvg(2.723):datavg[updatefs:625] getlvcb -T -A mirrlogBlv +epprd_rg:clvaryonvg(2.724):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.727):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.730):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.732):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.744):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.744):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.744):datavg[updatefs:632] : Mount point in /etc/filesystems for mirrlogBlv +epprd_rg:clvaryonvg(2.746):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/mirrlogBlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.749):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.749):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.753):datavg[updatefs:634] fs_mount_point=/oracle/EPP/mirrlogB +epprd_rg:clvaryonvg(2.753):datavg[updatefs:637] : CuAt label attribute for mirrlogBlv +epprd_rg:clvaryonvg(2.753):datavg[updatefs:639] clodmget -q 'name = mirrlogBlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.757):datavg[updatefs:639] CuAt_label=/oracle/EPP/mirrlogB +epprd_rg:clvaryonvg(2.758):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.759):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.763):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.763):datavg[updatefs:657] [[ -z /oracle/EPP/mirrlogB ]] +epprd_rg:clvaryonvg(2.763):datavg[updatefs:657] [[ /oracle/EPP/mirrlogB == None ]] +epprd_rg:clvaryonvg(2.763):datavg[updatefs:665] [[ /oracle/EPP/mirrlogB == /oracle/EPP/mirrlogB ]] +epprd_rg:clvaryonvg(2.763):datavg[updatefs:665] [[ /oracle/EPP/mirrlogB != /oracle/EPP/mirrlogB ]] +epprd_rg:clvaryonvg(2.763):datavg[updatefs:685] [[ /oracle/EPP/mirrlogB != /oracle/EPP/mirrlogB ]] +epprd_rg:clvaryonvg(2.763):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.763):datavg[updatefs:600] clodmget -q 'name = epprdaloglv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.766):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.766):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.766):datavg[updatefs:607] /usr/sbin/getlvcb -f epprdaloglv +epprd_rg:clvaryonvg(2.783):datavg[updatefs:607] fs_info=' ' +epprd_rg:clvaryonvg(2.783):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.783):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.783):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.783):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.783):datavg[updatefs:618] [[ -z ' ' ]] +epprd_rg:clvaryonvg(2.783):datavg[updatefs:618] [[ ' ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.783):datavg[updatefs:620] continue +epprd_rg:clvaryonvg(2.784):datavg[1641] : At this point, the volume should be varied on, so get the current +epprd_rg:clvaryonvg(2.784):datavg[1642] : timestamp if needed +epprd_rg:clvaryonvg(2.784):datavg[1644] vgdatimestamps +epprd_rg:clvaryonvg(2.784):datavg[vgdatimestamps:201] PS4_FUNC=vgdatimestamps +epprd_rg:clvaryonvg(2.784):datavg[vgdatimestamps:201] typeset PS4_FUNC +epprd_rg:clvaryonvg(2.784):datavg[vgdatimestamps:202] [[ high == high ]] +epprd_rg:clvaryonvg(2.784):datavg[vgdatimestamps:202] set -x +epprd_rg:clvaryonvg(2.784):datavg[vgdatimestamps:203] set -u +epprd_rg:clvaryonvg(2.784):datavg[vgdatimestamps:206] : See what timestamp LVM has recorded from the last time it checked +epprd_rg:clvaryonvg(2.784):datavg[vgdatimestamps:207] : the disks +epprd_rg:clvaryonvg(2.784):datavg[vgdatimestamps:209] /usr/sbin/getlvodm -T 00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(2.784):datavg[vgdatimestamps:209] 2> /dev/null +epprd_rg:clvaryonvg(2.787):datavg[vgdatimestamps:209] TS_FROM_ODM=63d4e4ec07aab272 +epprd_rg:clvaryonvg(2.787):datavg[vgdatimestamps:212] : Check to see if HACMP is maintaining a timestamp for this volume group +epprd_rg:clvaryonvg(2.787):datavg[vgdatimestamps:213] : Needed for some older volume groups +epprd_rg:clvaryonvg(2.787):datavg[vgdatimestamps:215] [[ -s /usr/es/sbin/cluster/etc/vg/datavg.tstamp ]] +epprd_rg:clvaryonvg(2.787):datavg[vgdatimestamps:234] : Get the time stamp from the actual disk +epprd_rg:clvaryonvg(2.787):datavg[vgdatimestamps:236] clvgdats /dev/datavg +epprd_rg:clvaryonvg(2.788):datavg[vgdatimestamps:236] 2> /dev/null +epprd_rg:clvaryonvg(2.797):datavg[vgdatimestamps:236] TS_FROM_DISK=63d4e4ec07aab272 +epprd_rg:clvaryonvg(2.797):datavg[vgdatimestamps:237] clvgdats_rc=0 +epprd_rg:clvaryonvg(2.797):datavg[vgdatimestamps:238] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.797):datavg[vgdatimestamps:247] [[ -z 63d4e4ec07aab272 ]] +epprd_rg:clvaryonvg(2.797):datavg[1645] [[ -z 63d4e4ec07aab272 ]] +epprd_rg:clvaryonvg(2.797):datavg[1656] : Finally, leave the volume in the requested state - on or off +epprd_rg:clvaryonvg(2.797):datavg[1658] [[ FALSE == TRUE ]] +epprd_rg:clvaryonvg(2.797):datavg[1665] (( 0 == 0 )) +epprd_rg:clvaryonvg(2.797):datavg[1668] : Synchronize time stamps globally +epprd_rg:clvaryonvg(2.797):datavg[1670] cl_update_vg_odm_ts -o datavg +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[77] version=1.13 +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[121] o_flag='' +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[122] f_flag='' +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[123] getopts :of option +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[126] : Local timestamps should be good, since volume group was +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[127] : just varyied on or off +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[128] o_flag=TRUE +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[123] getopts :of option +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[142] shift 1 +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[144] vg_name=datavg +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[145] [[ -z datavg ]] +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[151] shift +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[152] node_list='' +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[153] /usr/es/sbin/cluster/utilities/cl_get_path all +epprd_rg:cl_update_vg_odm_ts(0.004):datavg[153] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin +epprd_rg:cl_update_vg_odm_ts(0.004):datavg[155] [[ -z '' ]] +epprd_rg:cl_update_vg_odm_ts(0.004):datavg[158] : Check to see if this update is necessary - some LVM levels automatically +epprd_rg:cl_update_vg_odm_ts(0.004):datavg[159] : update volume group timestamps clusterwide. +epprd_rg:cl_update_vg_odm_ts(0.004):datavg[163] instfix -iqk IV74100 +epprd_rg:cl_update_vg_odm_ts(0.005):datavg[163] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.012):datavg[164] instfix -iqk IV74883 +epprd_rg:cl_update_vg_odm_ts(0.012):datavg[164] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.019):datavg[165] instfix -iqk IV74698 +epprd_rg:cl_update_vg_odm_ts(0.020):datavg[165] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.026):datavg[166] instfix -iqk IV74246 +epprd_rg:cl_update_vg_odm_ts(0.027):datavg[166] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.033):datavg[174] emgr -l -L IV74883 +epprd_rg:cl_update_vg_odm_ts(0.034):datavg[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.303):datavg[174] emgr -l -L IV74698 +epprd_rg:cl_update_vg_odm_ts(0.304):datavg[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.571):datavg[174] emgr -l -L IV74246 +epprd_rg:cl_update_vg_odm_ts(0.571):datavg[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.820):datavg[183] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_update_vg_odm_ts(0.820):datavg[184] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_update_vg_odm_ts(0.820):datavg[185] : 99.99.999.999 +epprd_rg:cl_update_vg_odm_ts(0.820):datavg[187] typeset -li V R M F +epprd_rg:cl_update_vg_odm_ts(0.821):datavg[188] typeset -Z2 V +epprd_rg:cl_update_vg_odm_ts(0.821):datavg[189] typeset -Z2 R +epprd_rg:cl_update_vg_odm_ts(0.821):datavg[190] typeset -Z3 M +epprd_rg:cl_update_vg_odm_ts(0.821):datavg[191] typeset -Z3 F +epprd_rg:cl_update_vg_odm_ts(0.821):datavg[192] lvm_lvl6=601008015 +epprd_rg:cl_update_vg_odm_ts(0.821):datavg[192] typeset -li lvm_lvl6 +epprd_rg:cl_update_vg_odm_ts(0.821):datavg[194] lvm_lvl7=701003046 +epprd_rg:cl_update_vg_odm_ts(0.821):datavg[194] typeset -li lvm_lvl7 +epprd_rg:cl_update_vg_odm_ts(0.821):datavg[195] VRMF=0 +epprd_rg:cl_update_vg_odm_ts(0.821):datavg[195] typeset -li VRMF +epprd_rg:cl_update_vg_odm_ts(0.821):datavg[198] : Here try and figure out what level of LVM is installed +epprd_rg:cl_update_vg_odm_ts(0.822):datavg[200] lslpp -lcqOr bos.rte.lvm +epprd_rg:cl_update_vg_odm_ts(0.823):datavg[200] cut -f3 -d: +epprd_rg:cl_update_vg_odm_ts(0.823):datavg[200] read V R M F +epprd_rg:cl_update_vg_odm_ts(0.823):datavg[200] IFS=. +epprd_rg:cl_update_vg_odm_ts(0.824):datavg[201] VRMF=0702005101 +epprd_rg:cl_update_vg_odm_ts(0.824):datavg[203] (( 7 == 6 && 702005101 >= 601008015 )) +epprd_rg:cl_update_vg_odm_ts(0.824):datavg[204] (( 702005101 >= 701003046 )) +epprd_rg:cl_update_vg_odm_ts(0.824):datavg[207] : LVM at a level in which timestamp update is unnecessary +epprd_rg:cl_update_vg_odm_ts(0.824):datavg[209] return 0 +epprd_rg:clvaryonvg(3.626):datavg[1674] : On successful varyon, clean up any files used to track errors with +epprd_rg:clvaryonvg(3.626):datavg[1675] : this volume group +epprd_rg:clvaryonvg(3.626):datavg[1677] rm -f /usr/es/sbin/cluster/etc/vg/datavg.desc /usr/es/sbin/cluster/etc/vg/datavg.replay /usr/es/sbin/cluster/etc/vg/datavg.perms /usr/es/sbin/cluster/etc/vg/datavg.tstamp /usr/es/sbin/cluster/etc/vg/datavg.fail +epprd_rg:clvaryonvg(3.629):datavg[1680] : Note that a sync has not been done on the volume group at this point. +epprd_rg:clvaryonvg(3.629):datavg[1681] : A sync is kicked off in cl_sync_vgs, once any filesystem mounts are +epprd_rg:clvaryonvg(3.629):datavg[1682] : complete. A sync at this time would interfere with the mounts +epprd_rg:clvaryonvg(3.629):datavg[1685] return 0 +epprd_rg:cl_activate_vgs(3.713):datavg[vgs_chk:103] ERRMSG=$'cl_set_vg_fence_height[126]: version @(#)10\t1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37\ncl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY)\ncl_set_vg_fence_height[214]: read(datavg, 16)\ncl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid)\ncl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0))' +epprd_rg:cl_activate_vgs(3.713):datavg[vgs_chk:104] RC=0 +epprd_rg:cl_activate_vgs(3.713):datavg[vgs_chk:107] (( 0 == 1 || 0 == 20 )) +epprd_rg:cl_activate_vgs(3.713):datavg[vgs_chk:115] : exit status of clvaryonvg -n datavg: 0 +epprd_rg:cl_activate_vgs(3.713):datavg[vgs_chk:117] [[ -n $'cl_set_vg_fence_height[126]: version @(#)10\t1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37\ncl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY)\ncl_set_vg_fence_height[214]: read(datavg, 16)\ncl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid)\ncl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0))' ]] +epprd_rg:cl_activate_vgs(3.713):datavg[vgs_chk:117] (( 0 != 1 )) +epprd_rg:cl_activate_vgs(3.714):datavg[vgs_chk:119] cl_echo 286 $'cl_activate_vgs: Successful clvaryonvg of datavg with message cl_set_vg_fence_height[126]: version @(#)10\t1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37\ncl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY)\ncl_set_vg_fence_height[214]: read(datavg, 16)\ncl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid)\ncl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0)).' cl_activate_vgs datavg 'cl_set_vg_fence_height[126]:' version '@(#)10' 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 'cl_set_vg_fence_height[180]:' 'open(/usr/es/sbin/cluster/etc/vg/datavg.uuid,' 'O_RDONLY)' 'cl_set_vg_fence_height[214]:' 'read(datavg,' '16)' 'cl_set_vg_fence_height[237]:' 'close(/usr/es/sbin/cluster/etc/vg/datavg.uuid)' 'cl_set_vg_fence_height[265]:' 'sfwSetFenceGroup(vg=datavg' uuid=ec2db4422261eae02091227fb9e53c88 height='rw(0))' Jan 28 2023 18:03:43cl_activate_vgs: Successful clvaryonvg of datavg with message cl_set_vg_fence_height[126]: version @(#)10 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 cl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY) cl_set_vg_fence_height[214]: read(datavg, 16) cl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0)).+epprd_rg:cl_activate_vgs(3.733):datavg[vgs_chk:123] [[ 0 != 0 ]] +epprd_rg:cl_activate_vgs(3.733):datavg[vgs_chk:127] amlog_trace '' 'Activating Volume Group|datavg' +epprd_rg:cl_activate_vgs(3.733):datavg[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_vgs(3.733):datavg[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_vgs(3.758):datavg[amlog_trace:319] cltime +epprd_rg:cl_activate_vgs(3.761):datavg[amlog_trace:319] DATE=2023-01-28T18:03:43.599659 +epprd_rg:cl_activate_vgs(3.761):datavg[amlog_trace:320] echo '|2023-01-28T18:03:43.599659|INFO: Activating Volume Group|datavg' +epprd_rg:cl_activate_vgs(3.761):datavg[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_vgs(3.761):datavg[vgs_chk:132] echo datavg 0 +epprd_rg:cl_activate_vgs(3.761):datavg[vgs_chk:132] 1>> /tmp/_activate_vgs.tmp +epprd_rg:cl_activate_vgs(3.761):datavg[vgs_chk:133] return 0 +epprd_rg:cl_activate_vgs:datavg[vgs_list:198] unset PS4_LOOP PS4_TIMER +epprd_rg:cl_activate_vgs[304] wait +epprd_rg:cl_activate_vgs[310] ALLNOERRVGS=All_nonerror_volume_groups +epprd_rg:cl_activate_vgs[311] cl_RMupdate resource_up All_nonerror_volume_groups cl_activate_vgs 2023-01-28T18:03:43.622702 2023-01-28T18:03:43.627172 +epprd_rg:cl_activate_vgs[318] [[ -f /tmp/_activate_vgs.tmp ]] +epprd_rg:cl_activate_vgs[320] grep ' 1' /tmp/_activate_vgs.tmp +epprd_rg:cl_activate_vgs[329] rm -f /tmp/_activate_vgs.tmp +epprd_rg:cl_activate_vgs[332] exit 0 +epprd_rg:process_resources[process_volume_groups:2584] RC=0 +epprd_rg:process_resources[process_volume_groups:2585] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[process_volume_groups:2598] (( 0 != 0 )) +epprd_rg:process_resources[process_volume_groups:2627] return 0 +epprd_rg:process_resources[process_volume_groups_main:2556] STAT=0 +epprd_rg:process_resources[process_volume_groups_main:2559] return 0 +epprd_rg:process_resources[3572] RC=0 +epprd_rg:process_resources[3573] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:03:43.645862 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=LOGREDO ACTION=ACQUIRE VOLUME_GROUPS='"datavg"' RESOURCE_GROUPS='"epprd_rg' '"' +epprd_rg:process_resources[1] JOB_TYPE=LOGREDO +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] VOLUME_GROUPS=datavg +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ LOGREDO == RELEASE ]] +epprd_rg:process_resources[3360] [[ LOGREDO == ONLINE ]] +epprd_rg:process_resources[3634] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[3635] logredo_volume_groups +epprd_rg:process_resources[logredo_volume_groups:2745] PS4_FUNC=logredo_volume_groups +epprd_rg:process_resources[logredo_volume_groups:2745] typeset PS4_FUNC +epprd_rg:process_resources(4.649)[logredo_volume_groups:2746] PS4_TIMER=true +epprd_rg:process_resources(4.649)[logredo_volume_groups:2746] typeset PS4_TIMER +epprd_rg:process_resources(4.649)[logredo_volume_groups:2747] [[ high == high ]] +epprd_rg:process_resources(4.649)[logredo_volume_groups:2747] set -x +epprd_rg:process_resources(4.649)[logredo_volume_groups:2749] TMP_FILE=/var/hacmp/log/.process_resources_logredo.28836342 +epprd_rg:process_resources(4.649)[logredo_volume_groups:2749] export TMP_FILE +epprd_rg:process_resources(4.649)[logredo_volume_groups:2750] rm -f '/var/hacmp/log/.process_resources_logredo*' +epprd_rg:process_resources(4.652)[logredo_volume_groups:2752] STAT=0 +epprd_rg:process_resources(4.652)[logredo_volume_groups:2755] export GROUPNAME +epprd_rg:process_resources(4.653)[logredo_volume_groups:2757] get_list_head datavg +epprd_rg:process_resources(4.654)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(4.654)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(4.654)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(4.654)[get_list_head:60] set -x +epprd_rg:process_resources(4.655)[get_list_head:61] echo datavg +epprd_rg:process_resources(4.655)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(4.655)[get_list_head:61] IFS=: +epprd_rg:process_resources(4.656)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(4.656)[get_list_head:62] echo datavg +epprd_rg:process_resources(4.654)[logredo_volume_groups:2757] read LIST_OF_VOLUME_GROUPS_FOR_RG +epprd_rg:process_resources(4.659)[logredo_volume_groups:2758] get_list_tail datavg +epprd_rg:process_resources(4.660)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(4.660)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(4.660)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(4.660)[get_list_tail:68] set -x +epprd_rg:process_resources(4.661)[get_list_tail:69] echo datavg +epprd_rg:process_resources(4.661)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(4.661)[get_list_tail:69] IFS=: +epprd_rg:process_resources(4.661)[get_list_tail:70] echo +epprd_rg:process_resources(4.659)[logredo_volume_groups:2758] read VOLUME_GROUPS +epprd_rg:process_resources(4.661)[logredo_volume_groups:2761] : Run logredo on all JFS/JFS2 log devices to assure FS consistency +epprd_rg:process_resources(4.661)[logredo_volume_groups:2763] ALL_LVs='' +epprd_rg:process_resources(4.661)[logredo_volume_groups:2764] lv_all='' +epprd_rg:process_resources(4.661)[logredo_volume_groups:2765] mount_fs='' +epprd_rg:process_resources(4.661)[logredo_volume_groups:2766] fsck_check='' +epprd_rg:process_resources(4.661)[logredo_volume_groups:2767] MOUNTGUARD='' +epprd_rg:process_resources(4.661)[logredo_volume_groups:2768] FMMOUNT_OUT='' +epprd_rg:process_resources(4.661)[logredo_volume_groups:2769] FMMOUNT='' +epprd_rg:process_resources(4.663)[logredo_volume_groups:2772] tail +3 +epprd_rg:process_resources(4.663)[logredo_volume_groups:2772] lsvg -lL datavg +epprd_rg:process_resources(4.663)[logredo_volume_groups:2772] LC_ALL=C +epprd_rg:process_resources(4.664)[logredo_volume_groups:2772] 1>> /var/hacmp/log/.process_resources_logredo.28836342 +epprd_rg:process_resources(4.687)[logredo_volume_groups:2774] awk '{print $1}' +epprd_rg:process_resources(4.687)[logredo_volume_groups:2774] cat /var/hacmp/log/.process_resources_logredo.28836342 +epprd_rg:process_resources(4.692)[logredo_volume_groups:2774] ALL_LVs=$'epprdaloglv\nsaplv\nsapmntlv\noraclelv\nepplv\noraarchlv\nsapdata1lv\nsapdata2lv\nsapdata3lv\nsapdata4lv\nboardlv\noriglogAlv\noriglogBlv\nmirrlogAlv\nmirrlogBlv' +epprd_rg:process_resources(4.692)[logredo_volume_groups:2777] : Verify if any of the file system associated with volume group datavg +epprd_rg:process_resources(4.692)[logredo_volume_groups:2778] : is already mounted anywhere else in the cluster. +epprd_rg:process_resources(4.692)[logredo_volume_groups:2779] : If it is already mounted somewhere else, we dont want to continue +epprd_rg:process_resources(4.692)[logredo_volume_groups:2780] : here to avoid data corruption. +epprd_rg:process_resources(4.694)[logredo_volume_groups:2782] awk '{print $1}' +epprd_rg:process_resources(4.694)[logredo_volume_groups:2782] cat /var/hacmp/log/.process_resources_logredo.28836342 +epprd_rg:process_resources(4.694)[logredo_volume_groups:2782] grep -v N/A +epprd_rg:process_resources(4.699)[logredo_volume_groups:2782] lv_all=$'saplv\nsapmntlv\noraclelv\nepplv\noraarchlv\nsapdata1lv\nsapdata2lv\nsapdata3lv\nsapdata4lv\nboardlv\noriglogAlv\noriglogBlv\nmirrlogAlv\nmirrlogBlv' +epprd_rg:process_resources(4.699)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.699)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.701)[logredo_volume_groups:2789] lsfs -qc saplv +epprd_rg:process_resources(4.702)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.702)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.702)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/saplv' was found in /etc/filesystems. +epprd_rg:process_resources(4.704)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.708)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.708)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.708)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.708)[logredo_volume_groups:2795] fsdb saplv +epprd_rg:process_resources(4.709)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.712)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.714)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.715)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.715)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.720)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.720)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.720)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.720)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.720)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.722)[logredo_volume_groups:2789] lsfs -qc sapmntlv +epprd_rg:process_resources(4.722)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.723)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.723)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/sapmntlv' was found in /etc/filesystems. +epprd_rg:process_resources(4.725)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.729)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.729)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.729)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.729)[logredo_volume_groups:2795] fsdb sapmntlv +epprd_rg:process_resources(4.730)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.733)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.735)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.735)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.735)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.740)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.740)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.740)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.740)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.740)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.742)[logredo_volume_groups:2789] lsfs -qc oraclelv +epprd_rg:process_resources(4.743)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.743)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.743)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/oraclelv' was found in /etc/filesystems. +epprd_rg:process_resources(4.745)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.749)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.749)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.749)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.749)[logredo_volume_groups:2795] fsdb oraclelv +epprd_rg:process_resources(4.750)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.753)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.755)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.756)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.756)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.761)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.761)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.761)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.761)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.761)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.763)[logredo_volume_groups:2789] lsfs -qc epplv +epprd_rg:process_resources(4.763)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.763)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.764)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/epplv' was found in /etc/filesystems. +epprd_rg:process_resources(4.765)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.769)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.769)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.769)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.769)[logredo_volume_groups:2795] fsdb epplv +epprd_rg:process_resources(4.770)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.774)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.776)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.776)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.776)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.781)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.781)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.781)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.781)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.781)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.783)[logredo_volume_groups:2789] lsfs -qc oraarchlv +epprd_rg:process_resources(4.783)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.784)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.784)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/oraarchlv' was found in /etc/filesystems. +epprd_rg:process_resources(4.786)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.790)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.790)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.790)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.790)[logredo_volume_groups:2795] fsdb oraarchlv +epprd_rg:process_resources(4.791)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.794)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.796)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.796)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.797)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.801)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.802)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.802)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.802)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.802)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.804)[logredo_volume_groups:2789] lsfs -qc sapdata1lv +epprd_rg:process_resources(4.804)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.804)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.805)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/sapdata1lv' was found in /etc/filesystems. +epprd_rg:process_resources(4.806)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.810)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.810)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.810)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.810)[logredo_volume_groups:2795] fsdb sapdata1lv +epprd_rg:process_resources(4.811)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.815)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.817)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.817)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.817)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.822)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.822)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.822)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.822)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.822)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.824)[logredo_volume_groups:2789] lsfs -qc sapdata2lv +epprd_rg:process_resources(4.825)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.825)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.825)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/sapdata2lv' was found in /etc/filesystems. +epprd_rg:process_resources(4.827)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.831)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.831)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.831)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.831)[logredo_volume_groups:2795] fsdb sapdata2lv +epprd_rg:process_resources(4.832)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.835)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.837)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.837)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.838)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.842)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.843)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.843)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.843)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.843)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.845)[logredo_volume_groups:2789] lsfs -qc sapdata3lv +epprd_rg:process_resources(4.845)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.845)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.846)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/sapdata3lv' was found in /etc/filesystems. +epprd_rg:process_resources(4.847)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.851)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.851)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.851)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.851)[logredo_volume_groups:2795] fsdb sapdata3lv +epprd_rg:process_resources(4.852)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.855)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.857)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.858)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.858)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.863)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.863)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.863)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.863)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.863)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.865)[logredo_volume_groups:2789] lsfs -qc sapdata4lv +epprd_rg:process_resources(4.865)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.865)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.866)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/sapdata4lv' was found in /etc/filesystems. +epprd_rg:process_resources(4.867)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.871)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.871)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.871)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.871)[logredo_volume_groups:2795] fsdb sapdata4lv +epprd_rg:process_resources(4.872)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.875)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.877)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.878)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.878)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.883)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.883)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.883)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.883)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.883)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.885)[logredo_volume_groups:2789] lsfs -qc boardlv +epprd_rg:process_resources(4.885)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.886)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.886)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/boardlv' was found in /etc/filesystems. +epprd_rg:process_resources(4.887)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.891)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.891)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.891)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.892)[logredo_volume_groups:2795] fsdb boardlv +epprd_rg:process_resources(4.893)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.896)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.898)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.898)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.898)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.903)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.903)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.903)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.903)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.903)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.905)[logredo_volume_groups:2789] lsfs -qc origlogAlv +epprd_rg:process_resources(4.905)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.906)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.906)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/origlogAlv' was found in /etc/filesystems. +epprd_rg:process_resources(4.908)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.911)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.911)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.911)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.912)[logredo_volume_groups:2795] fsdb origlogAlv +epprd_rg:process_resources(4.913)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.916)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.918)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.918)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.919)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.923)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.923)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.924)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.924)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.924)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.926)[logredo_volume_groups:2789] lsfs -qc origlogBlv +epprd_rg:process_resources(4.926)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.926)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.927)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/origlogBlv' was found in /etc/filesystems. +epprd_rg:process_resources(4.928)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.932)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.932)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.932)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.932)[logredo_volume_groups:2795] fsdb origlogBlv +epprd_rg:process_resources(4.933)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.936)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.938)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.939)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.939)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.944)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.944)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.944)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.944)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.944)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.946)[logredo_volume_groups:2789] lsfs -qc mirrlogAlv +epprd_rg:process_resources(4.946)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.946)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.947)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/mirrlogAlv' was found in /etc/filesystems. +epprd_rg:process_resources(4.948)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.952)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.952)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.952)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.952)[logredo_volume_groups:2795] fsdb mirrlogAlv +epprd_rg:process_resources(4.953)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.956)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.958)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.959)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.959)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.964)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.964)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.964)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.964)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.964)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.966)[logredo_volume_groups:2789] lsfs -qc mirrlogBlv +epprd_rg:process_resources(4.966)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.966)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.967)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/mirrlogBlv' was found in /etc/filesystems. +epprd_rg:process_resources(4.968)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.972)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.972)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.972)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.972)[logredo_volume_groups:2795] fsdb mirrlogBlv +epprd_rg:process_resources(4.973)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.977)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.979)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.979)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.979)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.984)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.984)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.984)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.984)[logredo_volume_groups:2814] comm_failure='' +epprd_rg:process_resources(4.984)[logredo_volume_groups:2815] rc_mount='' +epprd_rg:process_resources(4.984)[logredo_volume_groups:2816] [[ -n '' ]] +epprd_rg:process_resources(4.984)[logredo_volume_groups:2851] logdevs='' +epprd_rg:process_resources(4.984)[logredo_volume_groups:2852] HAVE_GEO='' +epprd_rg:process_resources(4.984)[logredo_volume_groups:2853] lslpp -l 'hageo.*' +epprd_rg:process_resources(4.985)[logredo_volume_groups:2853] 1> /dev/null 2>& 1 +epprd_rg:process_resources(4.988)[logredo_volume_groups:2854] lslpp -l 'geoRM.*' +epprd_rg:process_resources(4.989)[logredo_volume_groups:2854] 1> /dev/null 2>& 1 +epprd_rg:process_resources(4.992)[logredo_volume_groups:2874] pattern='jfs*log' +epprd_rg:process_resources(4.992)[logredo_volume_groups:2876] : Any device with the type as log should be added +epprd_rg:process_resources(4.992)[logredo_volume_groups:2882] odmget -q $'name = epprdaloglv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(4.996)[logredo_volume_groups:2882] [[ -n $'\nCuAt:\n\tname = "epprdaloglv"\n\tattribute = "type"\n\tvalue = "jfs2log"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(4.996)[logredo_volume_groups:2884] logdevs=' /dev/epprdaloglv' +epprd_rg:process_resources(4.996)[logredo_volume_groups:2882] odmget -q $'name = saplv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(4.999)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(4.999)[logredo_volume_groups:2882] odmget -q $'name = sapmntlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.003)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.003)[logredo_volume_groups:2882] odmget -q $'name = oraclelv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.006)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.006)[logredo_volume_groups:2882] odmget -q $'name = epplv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.010)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.010)[logredo_volume_groups:2882] odmget -q $'name = oraarchlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.013)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.013)[logredo_volume_groups:2882] odmget -q $'name = sapdata1lv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.017)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.017)[logredo_volume_groups:2882] odmget -q $'name = sapdata2lv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.020)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.020)[logredo_volume_groups:2882] odmget -q $'name = sapdata3lv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.024)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.024)[logredo_volume_groups:2882] odmget -q $'name = sapdata4lv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.027)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.027)[logredo_volume_groups:2882] odmget -q $'name = boardlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.031)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.031)[logredo_volume_groups:2882] odmget -q $'name = origlogAlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.034)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.034)[logredo_volume_groups:2882] odmget -q $'name = origlogBlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.038)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.038)[logredo_volume_groups:2882] odmget -q $'name = mirrlogAlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.041)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.042)[logredo_volume_groups:2882] odmget -q $'name = mirrlogBlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.045)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.045)[logredo_volume_groups:2889] : JFS2 file systems can have inline logs where the log LV is the same as the FS LV. +epprd_rg:process_resources(5.045)[logredo_volume_groups:2895] odmget $'-qname = epprdaloglv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.049)[logredo_volume_groups:2895] [[ -n '' ]] +epprd_rg:process_resources(5.049)[logredo_volume_groups:2895] odmget $'-qname = saplv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.052)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "saplv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.054)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.054)[logredo_volume_groups:2898] odmget -q 'name = saplv and attribute = label' CuAt +epprd_rg:process_resources(5.058)[logredo_volume_groups:2898] [[ -n /usr/sap ]] +epprd_rg:process_resources(5.060)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.060)[logredo_volume_groups:2900] grep -wp /dev/saplv /etc/filesystems +epprd_rg:process_resources(5.065)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.065)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.065)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/saplv ]] +epprd_rg:process_resources(5.065)[logredo_volume_groups:2895] odmget $'-qname = sapmntlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.069)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "sapmntlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.071)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.071)[logredo_volume_groups:2898] odmget -q 'name = sapmntlv and attribute = label' CuAt +epprd_rg:process_resources(5.075)[logredo_volume_groups:2898] [[ -n /sapmnt ]] +epprd_rg:process_resources(5.077)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.077)[logredo_volume_groups:2900] grep -wp /dev/sapmntlv /etc/filesystems +epprd_rg:process_resources(5.082)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.082)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.082)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/sapmntlv ]] +epprd_rg:process_resources(5.082)[logredo_volume_groups:2895] odmget $'-qname = oraclelv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.086)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "oraclelv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.088)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.088)[logredo_volume_groups:2898] odmget -q 'name = oraclelv and attribute = label' CuAt +epprd_rg:process_resources(5.092)[logredo_volume_groups:2898] [[ -n /oracle ]] +epprd_rg:process_resources(5.094)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.094)[logredo_volume_groups:2900] grep -wp /dev/oraclelv /etc/filesystems +epprd_rg:process_resources(5.099)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.099)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.099)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/oraclelv ]] +epprd_rg:process_resources(5.099)[logredo_volume_groups:2895] odmget $'-qname = epplv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.102)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "epplv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.104)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.104)[logredo_volume_groups:2898] odmget -q 'name = epplv and attribute = label' CuAt +epprd_rg:process_resources(5.109)[logredo_volume_groups:2898] [[ -n /oracle/EPP ]] +epprd_rg:process_resources(5.111)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.111)[logredo_volume_groups:2900] grep -wp /dev/epplv /etc/filesystems +epprd_rg:process_resources(5.116)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.116)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.116)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/epplv ]] +epprd_rg:process_resources(5.116)[logredo_volume_groups:2895] odmget $'-qname = oraarchlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.119)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "oraarchlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.121)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.121)[logredo_volume_groups:2898] odmget -q 'name = oraarchlv and attribute = label' CuAt +epprd_rg:process_resources(5.126)[logredo_volume_groups:2898] [[ -n /oracle/EPP/oraarch ]] +epprd_rg:process_resources(5.128)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.128)[logredo_volume_groups:2900] grep -wp /dev/oraarchlv /etc/filesystems +epprd_rg:process_resources(5.133)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.133)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.133)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/oraarchlv ]] +epprd_rg:process_resources(5.133)[logredo_volume_groups:2895] odmget $'-qname = sapdata1lv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.136)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "sapdata1lv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.138)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.138)[logredo_volume_groups:2898] odmget -q 'name = sapdata1lv and attribute = label' CuAt +epprd_rg:process_resources(5.143)[logredo_volume_groups:2898] [[ -n /oracle/EPP/sapdata1 ]] +epprd_rg:process_resources(5.145)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.145)[logredo_volume_groups:2900] grep -wp /dev/sapdata1lv /etc/filesystems +epprd_rg:process_resources(5.150)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.150)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.150)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/sapdata1lv ]] +epprd_rg:process_resources(5.150)[logredo_volume_groups:2895] odmget $'-qname = sapdata2lv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.153)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "sapdata2lv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.155)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.155)[logredo_volume_groups:2898] odmget -q 'name = sapdata2lv and attribute = label' CuAt +epprd_rg:process_resources(5.159)[logredo_volume_groups:2898] [[ -n /oracle/EPP/sapdata2 ]] +epprd_rg:process_resources(5.162)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.162)[logredo_volume_groups:2900] grep -wp /dev/sapdata2lv /etc/filesystems +epprd_rg:process_resources(5.167)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.167)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.167)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/sapdata2lv ]] +epprd_rg:process_resources(5.167)[logredo_volume_groups:2895] odmget $'-qname = sapdata3lv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.170)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "sapdata3lv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.172)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.172)[logredo_volume_groups:2898] odmget -q 'name = sapdata3lv and attribute = label' CuAt +epprd_rg:process_resources(5.176)[logredo_volume_groups:2898] [[ -n /oracle/EPP/sapdata3 ]] +epprd_rg:process_resources(5.178)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.178)[logredo_volume_groups:2900] grep -wp /dev/sapdata3lv /etc/filesystems +epprd_rg:process_resources(5.183)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.184)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.184)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/sapdata3lv ]] +epprd_rg:process_resources(5.184)[logredo_volume_groups:2895] odmget $'-qname = sapdata4lv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.187)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "sapdata4lv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.189)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.189)[logredo_volume_groups:2898] odmget -q 'name = sapdata4lv and attribute = label' CuAt +epprd_rg:process_resources(5.193)[logredo_volume_groups:2898] [[ -n /oracle/EPP/sapdata4 ]] +epprd_rg:process_resources(5.195)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.195)[logredo_volume_groups:2900] grep -wp /dev/sapdata4lv /etc/filesystems +epprd_rg:process_resources(5.200)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.200)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.200)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/sapdata4lv ]] +epprd_rg:process_resources(5.201)[logredo_volume_groups:2895] odmget $'-qname = boardlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.204)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "boardlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.206)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.206)[logredo_volume_groups:2898] odmget -q 'name = boardlv and attribute = label' CuAt +epprd_rg:process_resources(5.210)[logredo_volume_groups:2898] [[ -n /board_org ]] +epprd_rg:process_resources(5.212)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.212)[logredo_volume_groups:2900] grep -wp /dev/boardlv /etc/filesystems +epprd_rg:process_resources(5.217)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.217)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.217)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/boardlv ]] +epprd_rg:process_resources(5.218)[logredo_volume_groups:2895] odmget $'-qname = origlogAlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.221)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "origlogAlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.223)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.223)[logredo_volume_groups:2898] odmget -q 'name = origlogAlv and attribute = label' CuAt +epprd_rg:process_resources(5.227)[logredo_volume_groups:2898] [[ -n /oracle/EPP/origlogA ]] +epprd_rg:process_resources(5.229)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.229)[logredo_volume_groups:2900] grep -wp /dev/origlogAlv /etc/filesystems +epprd_rg:process_resources(5.234)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.234)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.234)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/origlogAlv ]] +epprd_rg:process_resources(5.235)[logredo_volume_groups:2895] odmget $'-qname = origlogBlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.238)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "origlogBlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.240)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.240)[logredo_volume_groups:2898] odmget -q 'name = origlogBlv and attribute = label' CuAt +epprd_rg:process_resources(5.244)[logredo_volume_groups:2898] [[ -n /oracle/EPP/origlogB ]] +epprd_rg:process_resources(5.246)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.246)[logredo_volume_groups:2900] grep -wp /dev/origlogBlv /etc/filesystems +epprd_rg:process_resources(5.251)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.251)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.251)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/origlogBlv ]] +epprd_rg:process_resources(5.251)[logredo_volume_groups:2895] odmget $'-qname = mirrlogAlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.255)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "mirrlogAlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.257)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.257)[logredo_volume_groups:2898] odmget -q 'name = mirrlogAlv and attribute = label' CuAt +epprd_rg:process_resources(5.261)[logredo_volume_groups:2898] [[ -n /oracle/EPP/mirrlogA ]] +epprd_rg:process_resources(5.264)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.264)[logredo_volume_groups:2900] grep -wp /dev/mirrlogAlv /etc/filesystems +epprd_rg:process_resources(5.269)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.269)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.269)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/mirrlogAlv ]] +epprd_rg:process_resources(5.269)[logredo_volume_groups:2895] odmget $'-qname = mirrlogBlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.272)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "mirrlogBlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.274)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.274)[logredo_volume_groups:2898] odmget -q 'name = mirrlogBlv and attribute = label' CuAt +epprd_rg:process_resources(5.278)[logredo_volume_groups:2898] [[ -n /oracle/EPP/mirrlogB ]] +epprd_rg:process_resources(5.280)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.281)[logredo_volume_groups:2900] grep -wp /dev/mirrlogBlv /etc/filesystems +epprd_rg:process_resources(5.285)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.286)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.286)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/mirrlogBlv ]] +epprd_rg:process_resources(5.286)[logredo_volume_groups:2910] : Remove any duplicates acquired so far +epprd_rg:process_resources(5.288)[logredo_volume_groups:2912] echo /dev/epprdaloglv +epprd_rg:process_resources(5.288)[logredo_volume_groups:2912] tr ' ' '\n' +epprd_rg:process_resources(5.289)[logredo_volume_groups:2912] sort -u +epprd_rg:process_resources(5.295)[logredo_volume_groups:2912] logdevs=/dev/epprdaloglv +epprd_rg:process_resources(5.295)[logredo_volume_groups:2915] : Run logredos in parallel to save time. +epprd_rg:process_resources(5.295)[logredo_volume_groups:2919] [[ -n '' ]] +epprd_rg:process_resources(5.295)[logredo_volume_groups:2944] : Run logredo only if the LV is closed. +epprd_rg:process_resources(5.295)[logredo_volume_groups:2946] awk '$1 ~ /^epprdaloglv$/ && $6 ~ /closed\// {print "CLOSED"}' /var/hacmp/log/.process_resources_logredo.28836342 +epprd_rg:process_resources(5.300)[logredo_volume_groups:2946] [[ -n CLOSED ]] +epprd_rg:process_resources(5.300)[logredo_volume_groups:2949] : Run logredo only if filesystem is not mounted on any of the node in the cluster. +epprd_rg:process_resources(5.300)[logredo_volume_groups:2951] [[ -z '' ]] +epprd_rg:process_resources(5.301)[logredo_volume_groups:2958] rm -f /var/hacmp/log/.process_resources_logredo.28836342 +epprd_rg:process_resources(5.301)[logredo_volume_groups:2953] logredo /dev/epprdaloglv +epprd_rg:process_resources(5.305)[logredo_volume_groups:2962] : Wait for the background logredos from the RGs +epprd_rg:process_resources(5.305)[logredo_volume_groups:2964] wait J2_LOGREDO:log redo processing for /dev/epprdaloglv +epprd_rg:process_resources(5.312)[logredo_volume_groups:2966] return 0 +epprd_rg:process_resources(5.312)[3324] true +epprd_rg:process_resources(5.312)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(5.312)[3328] set -a +epprd_rg:process_resources(5.312)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:03:44.328564 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(5.331)[3329] eval JOB_TYPE=FILESYSTEMS ACTION=ACQUIRE FILE_SYSTEMS='"/board_org,/oracle,/oracle/EPP,/oracle/EPP/mirrlogA,/oracle/EPP/mirrlogB,/oracle/EPP/oraarch,/oracle/EPP/origlogA,/oracle/EPP/origlogB,/oracle/EPP/sapdata1,/oracle/EPP/sapdata2,/oracle/EPP/sapdata3,/oracle/EPP/sapdata4,/sapmnt,/usr/sap"' RESOURCE_GROUPS='"epprd_rg' '"' FSCHECK_TOOLS='"fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck"' RECOVERY_METHODS='"sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential"' +epprd_rg:process_resources(5.331)[1] JOB_TYPE=FILESYSTEMS +epprd_rg:process_resources(5.331)[1] ACTION=ACQUIRE +epprd_rg:process_resources(5.331)[1] FILE_SYSTEMS=/board_org,/oracle,/oracle/EPP,/oracle/EPP/mirrlogA,/oracle/EPP/mirrlogB,/oracle/EPP/oraarch,/oracle/EPP/origlogA,/oracle/EPP/origlogB,/oracle/EPP/sapdata1,/oracle/EPP/sapdata2,/oracle/EPP/sapdata3,/oracle/EPP/sapdata4,/sapmnt,/usr/sap +epprd_rg:process_resources(5.331)[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources(5.331)[1] FSCHECK_TOOLS=fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck +epprd_rg:process_resources(5.331)[1] RECOVERY_METHODS=sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:process_resources(5.331)[3330] RC=0 +epprd_rg:process_resources(5.331)[3331] set +a +epprd_rg:process_resources(5.331)[3333] (( 0 != 0 )) +epprd_rg:process_resources(5.331)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(5.331)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(5.331)[3343] export GROUPNAME +epprd_rg:process_resources(5.331)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(5.331)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(5.331)[3360] [[ FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources(5.331)[3360] [[ FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources(5.331)[3482] process_file_systems ACQUIRE +epprd_rg:process_resources(5.331)[process_file_systems:2640] PS4_FUNC=process_file_systems +epprd_rg:process_resources(5.331)[process_file_systems:2640] typeset PS4_FUNC +epprd_rg:process_resources(5.331)[process_file_systems:2641] [[ high == high ]] +epprd_rg:process_resources(5.331)[process_file_systems:2641] set -x +epprd_rg:process_resources(5.331)[process_file_systems:2643] STAT=0 +epprd_rg:process_resources(5.331)[process_file_systems:2645] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources(5.331)[process_file_systems:2647] cl_activate_fs +epprd_rg:cl_activate_fs[819] version=1.1.8.5 +epprd_rg:cl_activate_fs[823] : Check for mounting OEM file systems +epprd_rg:cl_activate_fs[825] OEM_FS=false +epprd_rg:cl_activate_fs[826] (( 0 != 0 )) +epprd_rg:cl_activate_fs[832] STATUS=0 +epprd_rg:cl_activate_fs[832] typeset -li STATUS +epprd_rg:cl_activate_fs[833] EMULATE=REAL +epprd_rg:cl_activate_fs[836] : The environment variable MOUNT_WLMCNTRL_SELFMANAGE is referred inside mount. +epprd_rg:cl_activate_fs[837] : If this variable is set, few calls to wlmcntrl are skipped inside mount, which +epprd_rg:cl_activate_fs[838] : offers performance benefits. Hence we will export this variable if it is set +epprd_rg:cl_activate_fs[839] : in /etc/environment. +epprd_rg:cl_activate_fs[841] grep -w ^MOUNT_WLMCNTRL_SELFMANAGE /etc/environment +epprd_rg:cl_activate_fs[841] export eval +epprd_rg:cl_activate_fs[843] [[ -n FILESYSTEMS ]] +epprd_rg:cl_activate_fs[843] [[ FILESYSTEMS != GROUP ]] +epprd_rg:cl_activate_fs[846] : If JOB_TYPE is set, and it does not equal to GROUP, then +epprd_rg:cl_activate_fs[847] : we are processing for process_resources, which passes requests +epprd_rg:cl_activate_fs[848] : associaed with multiple resource groups through environment variables +epprd_rg:cl_activate_fs[850] activate_fs_process_resources +epprd_rg:cl_activate_fs[activate_fs_process_resources:716] [[ high == high ]] +epprd_rg:cl_activate_fs[activate_fs_process_resources:716] set -x +epprd_rg:cl_activate_fs[activate_fs_process_resources:718] ERRSTATUS=0 +epprd_rg:cl_activate_fs[activate_fs_process_resources:718] typeset -i ERRSTATUS +epprd_rg:cl_activate_fs[activate_fs_process_resources:719] RC=0 +epprd_rg:cl_activate_fs[activate_fs_process_resources:719] typeset -li RC +epprd_rg:cl_activate_fs[activate_fs_process_resources:742] export GROUPNAME +epprd_rg:cl_activate_fs[activate_fs_process_resources:745] : Get the file systems, recovery tool and procedure for this +epprd_rg:cl_activate_fs[activate_fs_process_resources:746] : resource group +epprd_rg:cl_activate_fs[activate_fs_process_resources:748] print /board_org,/oracle,/oracle/EPP,/oracle/EPP/mirrlogA,/oracle/EPP/mirrlogB,/oracle/EPP/oraarch,/oracle/EPP/origlogA,/oracle/EPP/origlogB,/oracle/EPP/sapdata1,/oracle/EPP/sapdata2,/oracle/EPP/sapdata3,/oracle/EPP/sapdata4,/sapmnt,/usr/sap +epprd_rg:cl_activate_fs[activate_fs_process_resources:748] read _RG_FILE_SYSTEMS FILE_SYSTEMS +epprd_rg:cl_activate_fs[activate_fs_process_resources:748] IFS=: +epprd_rg:cl_activate_fs[activate_fs_process_resources:749] print fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck +epprd_rg:cl_activate_fs[activate_fs_process_resources:749] read _RG_FSCHECK_TOOLS FSCHECK_TOOLS +epprd_rg:cl_activate_fs[activate_fs_process_resources:749] IFS=: +epprd_rg:cl_activate_fs[activate_fs_process_resources:750] print sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:cl_activate_fs[activate_fs_process_resources:750] read _RG_RECOVERY_METHODS RECOVERY_METHODS +epprd_rg:cl_activate_fs[activate_fs_process_resources:750] IFS=: +epprd_rg:cl_activate_fs[activate_fs_process_resources:753] : Since all file systems in a resource group use the same recovery +epprd_rg:cl_activate_fs[activate_fs_process_resources:754] : method and recovery means, just pick up the first one in the list +epprd_rg:cl_activate_fs[activate_fs_process_resources:756] print fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck +epprd_rg:cl_activate_fs[activate_fs_process_resources:756] read FSCHECK_TOOL rest +epprd_rg:cl_activate_fs[activate_fs_process_resources:756] IFS=, +epprd_rg:cl_activate_fs[activate_fs_process_resources:757] print sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:cl_activate_fs[activate_fs_process_resources:757] read RECOVERY_METHOD rest +epprd_rg:cl_activate_fs[activate_fs_process_resources:757] IFS=, +epprd_rg:cl_activate_fs[activate_fs_process_resources:760] : If there are any unmounted file systems for this resource group, go +epprd_rg:cl_activate_fs[activate_fs_process_resources:761] : recover and mount them. +epprd_rg:cl_activate_fs[activate_fs_process_resources:763] [[ -n /board_org,/oracle,/oracle/EPP,/oracle/EPP/mirrlogA,/oracle/EPP/mirrlogB,/oracle/EPP/oraarch,/oracle/EPP/origlogA,/oracle/EPP/origlogB,/oracle/EPP/sapdata1,/oracle/EPP/sapdata2,/oracle/EPP/sapdata3,/oracle/EPP/sapdata4,/sapmnt,/usr/sap ]] +epprd_rg:cl_activate_fs[activate_fs_process_resources:765] IFS=, +epprd_rg:cl_activate_fs[activate_fs_process_resources:765] set -- /board_org,/oracle,/oracle/EPP,/oracle/EPP/mirrlogA,/oracle/EPP/mirrlogB,/oracle/EPP/oraarch,/oracle/EPP/origlogA,/oracle/EPP/origlogB,/oracle/EPP/sapdata1,/oracle/EPP/sapdata2,/oracle/EPP/sapdata3,/oracle/EPP/sapdata4,/sapmnt,/usr/sap +epprd_rg:cl_activate_fs[activate_fs_process_resources:765] print /board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap +epprd_rg:cl_activate_fs[activate_fs_process_resources:765] RG_FILE_SYSTEMS='/board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap' +epprd_rg:cl_activate_fs[activate_fs_process_resources:766] activate_fs_process_group sequential fsck '/board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap' +epprd_rg:cl_activate_fs[activate_fs_process_group:362] PS4_LOOP='' +epprd_rg:cl_activate_fs[activate_fs_process_group:362] typeset PS4_LOOP +epprd_rg:cl_activate_fs[activate_fs_process_group:363] [[ high == high ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:363] set -x +epprd_rg:cl_activate_fs[activate_fs_process_group:365] typeset RECOVERY_METHOD FSCHECK_TOOL FILESYSTEMS +epprd_rg:cl_activate_fs[activate_fs_process_group:366] STATUS=0 +epprd_rg:cl_activate_fs[activate_fs_process_group:366] typeset -i STATUS +epprd_rg:cl_activate_fs[activate_fs_process_group:368] RECOVERY_METHOD=sequential +epprd_rg:cl_activate_fs[activate_fs_process_group:369] FSCHECK_TOOL=fsck +epprd_rg:cl_activate_fs[activate_fs_process_group:370] shift 2 +epprd_rg:cl_activate_fs[activate_fs_process_group:371] FILESYSTEMS='/board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap' +epprd_rg:cl_activate_fs[activate_fs_process_group:372] comm_failure='' +epprd_rg:cl_activate_fs[activate_fs_process_group:372] typeset comm_failure +epprd_rg:cl_activate_fs[activate_fs_process_group:373] rc_mount='' +epprd_rg:cl_activate_fs[activate_fs_process_group:373] typeset rc_mount +epprd_rg:cl_activate_fs[activate_fs_process_group:376] : Filter out duplicates, and file systems which are already mounted +epprd_rg:cl_activate_fs[activate_fs_process_group:378] mounts_to_do '/board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap' +epprd_rg:cl_activate_fs[mounts_to_do:283] tomount='/board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap' +epprd_rg:cl_activate_fs[mounts_to_do:283] typeset tomount +epprd_rg:cl_activate_fs[mounts_to_do:286] : Get most current list of mounted filesystems +epprd_rg:cl_activate_fs[mounts_to_do:288] mount +epprd_rg:cl_activate_fs[mounts_to_do:288] 2> /dev/null +epprd_rg:cl_activate_fs[mounts_to_do:288] awk '$3 ~ /jfs2*$/ {print $2}' +epprd_rg:cl_activate_fs[mounts_to_do:288] paste -s - +epprd_rg:cl_activate_fs[mounts_to_do:288] mounted=$'/\t/usr\t/var\t/tmp\t/home\t/admin\t/opt\t/var/adm/ras/livedump\t/ptf' +epprd_rg:cl_activate_fs[mounts_to_do:288] typeset mounted +epprd_rg:cl_activate_fs[mounts_to_do:291] shift +epprd_rg:cl_activate_fs[mounts_to_do:294] typeset -A mountedArray tomountArray +epprd_rg:cl_activate_fs[mounts_to_do:295] typeset fs +epprd_rg:cl_activate_fs[mounts_to_do:298] : Create an associative array for each list, which +epprd_rg:cl_activate_fs[mounts_to_do:299] : has the side effect of dropping any duplicates +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/usr]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/var]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/tmp]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/home]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/admin]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/opt]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/var/adm/ras/livedump]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/ptf]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/board_org]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/mirrlogA]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/mirrlogB]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/oraarch]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/origlogA]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/origlogB]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/sapdata1]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/sapdata2]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/sapdata3]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/sapdata4]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/sapmnt]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/usr/sap]=1 +epprd_rg:cl_activate_fs[mounts_to_do:310] mounted='' +epprd_rg:cl_activate_fs[mounts_to_do:311] tomount='' +epprd_rg:cl_activate_fs[mounts_to_do:314] : expand fs from all tomountArray subscript names +epprd_rg:cl_activate_fs[mounts_to_do:316] set +u +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:329] : Print all subscript names which are all remaining mount +epprd_rg:cl_activate_fs[mounts_to_do:330] : points which have to be mounted +epprd_rg:cl_activate_fs[mounts_to_do:332] print /board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap +epprd_rg:cl_activate_fs[mounts_to_do:332] tr ' ' '\n' +epprd_rg:cl_activate_fs[mounts_to_do:332] sort -u +epprd_rg:cl_activate_fs[mounts_to_do:334] set -u +epprd_rg:cl_activate_fs[activate_fs_process_group:378] FILESYSTEMS=$'/board_org\n/oracle\n/oracle/EPP\n/oracle/EPP/mirrlogA\n/oracle/EPP/mirrlogB\n/oracle/EPP/oraarch\n/oracle/EPP/origlogA\n/oracle/EPP/origlogB\n/oracle/EPP/sapdata1\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata4\n/sapmnt\n/usr/sap' +epprd_rg:cl_activate_fs[activate_fs_process_group:379] [[ -z $'/board_org\n/oracle\n/oracle/EPP\n/oracle/EPP/mirrlogA\n/oracle/EPP/mirrlogB\n/oracle/EPP/oraarch\n/oracle/EPP/origlogA\n/oracle/EPP/origlogB\n/oracle/EPP/sapdata1\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata4\n/sapmnt\n/usr/sap' ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:385] : Get unique temporary file names by using the resource group and the +epprd_rg:cl_activate_fs[activate_fs_process_group:386] : current process ID +epprd_rg:cl_activate_fs[activate_fs_process_group:388] [[ -z epprd_rg ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:397] TMP_FILENAME=epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs[activate_fs_process_group:398] rm -f /tmp/epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs[activate_fs_process_group:401] : If FSCHECK_TOOL is null get from ODM +epprd_rg:cl_activate_fs[activate_fs_process_group:403] [[ -z fsck ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:408] print fsck +epprd_rg:cl_activate_fs[activate_fs_process_group:408] FSCHECK_TOOL=fsck +epprd_rg:cl_activate_fs[activate_fs_process_group:409] [[ fsck != fsck ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:416] : If RECOVERY_METHOD is null get from ODM +epprd_rg:cl_activate_fs[activate_fs_process_group:418] [[ -z sequential ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:423] print sequential +epprd_rg:cl_activate_fs[activate_fs_process_group:423] RECOVERY_METHOD=sequential +epprd_rg:cl_activate_fs[activate_fs_process_group:424] [[ sequential != sequential ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:431] set -u +epprd_rg:cl_activate_fs[activate_fs_process_group:434] : If FSCHECK_TOOL is set to logredo, the logredo for each jfslog has +epprd_rg:cl_activate_fs[activate_fs_process_group:435] : already been done in get_disk_vg_fs, so we only need to do fsck check +epprd_rg:cl_activate_fs[activate_fs_process_group:436] : and recovery here before going on to do the mounts +epprd_rg:cl_activate_fs[activate_fs_process_group:438] [[ fsck == fsck ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:441] TOOL='/usr/sbin/fsck -f -p -o nologredo' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:445] PS4_LOOP=/board_org +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:446] lsfs /board_org +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:446] grep -w /board_org +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:449] : Verify if any of the file system /board_org is already mounted anywhere +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] lsfs -qc /board_org +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:463] fsdb /board_org +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/board_org\n\nFile System Size:\t\t10485032\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t16384\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000009ffd28\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t91\n[10] s_agsize:\t\t0x00004000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x0013ffa5\n \t\t s_fsckpxd.address:\t1310629\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'boardl\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5832\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000000b5\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t181\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d880\t[52] last unmounted:\t0x63d4e41f\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/board_org\n\nFile System Size:\t\t10485032\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t16384\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000009ffd28\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t91\n[10] s_agsize:\t\t0x00004000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x0013ffa5\n \t\t s_fsckpxd.address:\t1310629\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'boardl\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5832\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000000b5\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t181\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d880\t[52] last unmounted:\t0x63d4e41f\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/boardlv The current volume is: /dev/boardlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:445] PS4_LOOP=/oracle +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:446] lsfs /oracle +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:446] grep -w /oracle +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:449] : Verify if any of the file system /oracle is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] lsfs -qc /oracle +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:463] fsdb /oracle +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle\n\nFile System Size:\t\t41941352\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t65536\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000027ff968\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t211\n[10] s_agsize:\t\t0x00010000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x004fff2d\n \t\t s_fsckpxd.address:\t5242669\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'oracle\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5819\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000295\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t661\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d881\t[52] last unmounted:\t0x63d4e41f\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle\n\nFile System Size:\t\t41941352\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t65536\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000027ff968\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t211\n[10] s_agsize:\t\t0x00010000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x004fff2d\n \t\t s_fsckpxd.address:\t5242669\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'oracle\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5819\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000295\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t661\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d881\t[52] last unmounted:\t0x63d4e41f\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/oraclelv The current volume is: /dev/oraclelv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:446] lsfs /oracle/EPP +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:446] grep -w /oracle/EPP +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] lsfs -qc /oracle/EPP +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:463] fsdb /oracle/EPP +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP\n\nFile System Size:\t\t62912232\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t65536\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x0000000003bff6e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t291\n[10] s_agsize:\t\t0x00010000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x0077fedd\n \t\t s_fsckpxd.address:\t7864029\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'epplv\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5824\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000003d5\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t981\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d881\t[52] last unmounted:\t0x63d4e41f\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP\n\nFile System Size:\t\t62912232\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t65536\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x0000000003bff6e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t291\n[10] s_agsize:\t\t0x00010000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x0077fedd\n \t\t s_fsckpxd.address:\t7864029\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'epplv\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5824\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000003d5\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t981\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d881\t[52] last unmounted:\t0x63d4e41f\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/epplv The current volume is: /dev/epplv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:446] lsfs /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:446] grep -w /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/mirrlogA is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] lsfs -qc /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:463] fsdb /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/mirrlogA\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'mirrlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5834\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d881\t[52] last unmounted:\t0x63d4e41e\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/mirrlogA\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'mirrlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5834\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d881\t[52] last unmounted:\t0x63d4e41e\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/mirrlogAlv The current volume is: /dev/mirrlogAlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:446] lsfs /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:446] grep -w /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/mirrlogB is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] lsfs -qc /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:463] fsdb /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/mirrlogB\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'mirrlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5835\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d881\t[52] last unmounted:\t0x63d4e41e\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/mirrlogB\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'mirrlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5835\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d881\t[52] last unmounted:\t0x63d4e41e\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/mirrlogBlv The current volume is: /dev/mirrlogBlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/oraarch +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:446] lsfs /oracle/EPP/oraarch +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:446] grep -w /oracle/EPP/oraarch +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/oraarch is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] lsfs -qc /oracle/EPP/oraarch +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:463] fsdb /oracle/EPP/oraarch +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/oraarch\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'oraarc\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d582e\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d882\t[52] last unmounted:\t0x63d4e41e\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/oraarch\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'oraarc\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d582e\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d882\t[52] last unmounted:\t0x63d4e41e\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/oraarchlv The current volume is: /dev/oraarchlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/origlogA +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:446] lsfs /oracle/EPP/origlogA +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:446] grep -w /oracle/EPP/origlogA +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/origlogA is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] lsfs -qc /oracle/EPP/origlogA +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:463] fsdb /oracle/EPP/origlogA +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/origlogA\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'origlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5832\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d882\t[52] last unmounted:\t0x63d4e41e\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/origlogA\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'origlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5832\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d882\t[52] last unmounted:\t0x63d4e41e\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/origlogAlv The current volume is: /dev/origlogAlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/origlogB +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:446] lsfs /oracle/EPP/origlogB +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:446] grep -w /oracle/EPP/origlogB +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/origlogB is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] lsfs -qc /oracle/EPP/origlogB +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:463] fsdb /oracle/EPP/origlogB +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/origlogB\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'origlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5833\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d882\t[52] last unmounted:\t0x63d4e41d\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/origlogB\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'origlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5833\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d882\t[52] last unmounted:\t0x63d4e41d\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/origlogBlv The current volume is: /dev/origlogBlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:446] lsfs /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:446] grep -w /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/sapdata1 is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] lsfs -qc /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:463] fsdb /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/sapdata1\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d582f\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d883\t[52] last unmounted:\t0x63d4e41d\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/sapdata1\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d582f\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d883\t[52] last unmounted:\t0x63d4e41d\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/sapdata1lv The current volume is: /dev/sapdata1lv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:446] lsfs /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:446] grep -w /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/sapdata2 is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] lsfs -qc /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:463] fsdb /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/sapdata2\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5830\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d883\t[52] last unmounted:\t0x63d4e41d\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/sapdata2\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5830\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d883\t[52] last unmounted:\t0x63d4e41d\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/sapdata2lv The current volume is: /dev/sapdata2lv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:446] lsfs /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:446] grep -w /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/sapdata3 is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] lsfs -qc /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:463] fsdb /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/sapdata3\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5831\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d883\t[52] last unmounted:\t0x63d4e41d\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/sapdata3\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5831\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d883\t[52] last unmounted:\t0x63d4e41d\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/sapdata3lv The current volume is: /dev/sapdata3lv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:446] lsfs /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:446] grep -w /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/sapdata4 is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] lsfs -qc /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:463] fsdb /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/sapdata4\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5831\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d883\t[52] last unmounted:\t0x63d4e41d\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/sapdata4\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5831\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d883\t[52] last unmounted:\t0x63d4e41d\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/sapdata4lv The current volume is: /dev/sapdata4lv Primary superblock is valid. +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:445] PS4_LOOP=/sapmnt +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:446] lsfs /sapmnt +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:446] grep -w /sapmnt +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:449] : Verify if any of the file system /sapmnt is already mounted anywhere +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] lsfs -qc /sapmnt +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:463] fsdb /sapmnt +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/sapmnt\n\nFile System Size:\t\t20970472\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t32768\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000013ffbe8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t131\n[10] s_agsize:\t\t0x00008000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x0027ff7d\n \t\t s_fsckpxd.address:\t2621309\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapmnt\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5818\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000155\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t341\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d884\t[52] last unmounted:\t0x63d4e41c\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/sapmnt\n\nFile System Size:\t\t20970472\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t32768\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000013ffbe8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t131\n[10] s_agsize:\t\t0x00008000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x0027ff7d\n \t\t s_fsckpxd.address:\t2621309\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapmnt\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5818\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000155\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t341\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d884\t[52] last unmounted:\t0x63d4e41c\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/sapmntlv The current volume is: /dev/sapmntlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:445] PS4_LOOP=/usr/sap +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:446] lsfs /usr/sap +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:446] grep -w /usr/sap +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:449] : Verify if any of the file system /usr/sap is already mounted anywhere +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] lsfs -qc /usr/sap +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:463] fsdb /usr/sap +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/usr/sap\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'saplv\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5815\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d884\t[52] last unmounted:\t0x63d4e41c\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/usr/sap\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000002\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'saplv\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5815\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4d884\t[52] last unmounted:\t0x63d4e41c\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/saplv The current volume is: /dev/saplv Primary superblock is valid. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:513] : Allow any backgrounded fsck operations to finish +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:515] wait +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:519] : Now attempt to mount all the file systems +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:521] ALLFS=All_filesystems +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:522] cl_RMupdate resource_acquiring All_filesystems cl_activate_fs 2023-01-28T18:03:45.108553 2023-01-28T18:03:45.112901 +epprd_rg:cl_activate_fs(0.773):/usr/sap[activate_fs_process_group:524] PS4_TIMER=true +epprd_rg:cl_activate_fs(0.773):/usr/sap[activate_fs_process_group:524] typeset PS4_TIMER +epprd_rg:cl_activate_fs(0.773):/board_org[activate_fs_process_group:527] PS4_LOOP=/board_org +epprd_rg:cl_activate_fs(0.773):/board_org[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(0.773):/board_org[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(0.773):/board_org[activate_fs_process_group:540] fs_mount /board_org fsck epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(0.773):/board_org[fs_mount:69] FS=/board_org +epprd_rg:cl_activate_fs(0.773):/board_org[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(0.773):/board_org[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(0.773):/board_org[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(0.773):/board_org[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(0.773):/board_org[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(0.773):/board_org[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(0.773):/board_org[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(0.773):/board_org[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(0.773):/board_org[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(0.773):/board_org[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(0.773):/board_org[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(0.773):/board_org[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(0.773):/board_org[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(0.773):/board_org[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(0.773):/board_org[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(0.773):/board_org[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(0.773):/board_org[fs_mount:81] : Here check to see if the information in /etc/filesystems for /board_org +epprd_rg:cl_activate_fs(0.773):/board_org[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(0.773):/board_org[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(0.773):/board_org[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(0.773):/board_org[fs_mount:86] lsfs -c /board_org +epprd_rg:cl_activate_fs(0.774):/board_org[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(0.779):/board_org[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(0.775):/board_org[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(0.779):/board_org[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(0.779):/board_org[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(0.780):/board_org[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(0.775):/board_org[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(0.781):/board_org[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(0.782):/board_org[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(0.782):/board_org[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(0.783):/board_org[fs_mount:100] LV_name=boardlv +epprd_rg:cl_activate_fs(0.783):/board_org[fs_mount:101] getlvcb -T -A boardlv +epprd_rg:cl_activate_fs(0.784):/board_org[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(0.802):/board_org[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(0.784):/board_org[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.11 \n\t lvname = boardlv \n\t label = /board_org \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:47 2022\n \t time modified = Sat Jan 28 17:10:40 2023\n ' +epprd_rg:cl_activate_fs(0.802):/board_org[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(0.802):/board_org[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(0.803):/board_org[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(0.784):/board_org[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.11 \n\t lvname = boardlv \n\t label = /board_org \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:47 2022\n \t time modified = Sat Jan 28 17:10:40 2023\n ' +epprd_rg:cl_activate_fs(0.804):/board_org[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(0.805):/board_org[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(0.806):/board_org[fs_mount:115] clodmget -q 'name = boardlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(0.810):/board_org[fs_mount:115] CuAt_label=/board_org +epprd_rg:cl_activate_fs(0.810):/board_org[fs_mount:118] : At this point, if things are working correctly, /board_org from /etc/filesystems +epprd_rg:cl_activate_fs(0.810):/board_org[fs_mount:119] : should match /board_org from CuAt ODM and /board_org from the LVCB +epprd_rg:cl_activate_fs(0.810):/board_org[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(0.810):/board_org[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(0.810):/board_org[fs_mount:123] [[ /board_org != /board_org ]] +epprd_rg:cl_activate_fs(0.810):/board_org[fs_mount:128] [[ /board_org != /board_org ]] +epprd_rg:cl_activate_fs(0.810):/board_org[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(0.810):/board_org[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(0.810):/board_org[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(0.830):/board_org[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(0.830):/board_org[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(0.830):/board_org[fs_mount:160] amlog_trace '' 'Activating Filesystem|/board_org' +epprd_rg:cl_activate_fs(0.830):/board_org[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(0.830):/board_org[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(0.855):/board_org[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(0.857):/board_org[amlog_trace:319] DATE=2023-01-28T18:03:45.197781 +epprd_rg:cl_activate_fs(0.858):/board_org[amlog_trace:320] echo '|2023-01-28T18:03:45.197781|INFO: Activating Filesystem|/board_org' +epprd_rg:cl_activate_fs(0.858):/board_org[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(0.858):/board_org[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(0.860):/board_org[fs_mount:162] : Try to mount filesystem /board_org at Jan 28 18:03:45.000 +epprd_rg:cl_activate_fs(0.860):/board_org[fs_mount:163] mount /board_org +epprd_rg:cl_activate_fs(0.872):/board_org[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(0.872):/board_org[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(0.872):/board_org[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(0.872):/board_org[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/board_org' +epprd_rg:cl_activate_fs(0.872):/board_org[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(0.873):/board_org[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(0.897):/board_org[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(0.900):/board_org[amlog_trace:319] DATE=2023-01-28T18:03:45.240177 +epprd_rg:cl_activate_fs(0.900):/board_org[amlog_trace:320] echo '|2023-01-28T18:03:45.240177|INFO: Activating Filesystems completed|/board_org' +epprd_rg:cl_activate_fs(0.900):/board_org[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(0.900):/board_org[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(0.900):/board_org[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(0.900):/board_org[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(0.900):/board_org[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(0.900):/board_org[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(0.900):/board_org[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(0.900):/board_org[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(0.900):/board_org[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(0.900):/board_org[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(0.900):/board_org[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(0.901):/board_org[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(0.902):/board_org[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(0.904):/board_org[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(0.904):/board_org[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(0.904):/board_org[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(0.904):/board_org[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(0.904):/board_org[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(0.904):/board_org[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(0.904):/board_org[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(0.904):/board_org[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(0.784):/board_org[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.11 \n\t lvname = boardlv \n\t label = /board_org \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:47 2022\n \t time modified = Sat Jan 28 17:10:40 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(0.904):/board_org[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(0.904):/oracle[activate_fs_process_group:527] PS4_LOOP=/oracle +epprd_rg:cl_activate_fs(0.904):/oracle[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(0.904):/oracle[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(0.904):/oracle[activate_fs_process_group:540] fs_mount /oracle fsck epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(0.904):/oracle[fs_mount:69] FS=/oracle +epprd_rg:cl_activate_fs(0.904):/oracle[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(0.904):/oracle[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(0.904):/oracle[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(0.904):/oracle[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(0.904):/oracle[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(0.904):/oracle[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(0.904):/oracle[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(0.904):/oracle[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(0.904):/oracle[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(0.904):/oracle[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(0.904):/oracle[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(0.904):/oracle[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(0.904):/oracle[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(0.904):/oracle[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(0.904):/oracle[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(0.904):/oracle[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(0.904):/oracle[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle +epprd_rg:cl_activate_fs(0.904):/oracle[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(0.904):/oracle[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(0.904):/oracle[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(0.904):/oracle[fs_mount:86] lsfs -c /oracle +epprd_rg:cl_activate_fs(0.905):/oracle[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(0.910):/oracle[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(0.906):/oracle[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle:/dev/oraclelv:jfs2:::41943040:rw:no:no' +epprd_rg:cl_activate_fs(0.911):/oracle[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(0.911):/oracle[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(0.912):/oracle[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(0.906):/oracle[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle:/dev/oraclelv:jfs2:::41943040:rw:no:no' +epprd_rg:cl_activate_fs(0.913):/oracle[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(0.914):/oracle[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(0.914):/oracle[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(0.914):/oracle[fs_mount:100] LV_name=oraclelv +epprd_rg:cl_activate_fs(0.914):/oracle[fs_mount:101] getlvcb -T -A oraclelv +epprd_rg:cl_activate_fs(0.915):/oracle[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(0.933):/oracle[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(0.915):/oracle[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.4 \n\t lvname = oraclelv \n\t label = /oracle \n\t machine id = 44AF14B00 \n\t number lps = 40 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:42 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' +epprd_rg:cl_activate_fs(0.933):/oracle[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(0.933):/oracle[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(0.934):/oracle[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(0.915):/oracle[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.4 \n\t lvname = oraclelv \n\t label = /oracle \n\t machine id = 44AF14B00 \n\t number lps = 40 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:42 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' +epprd_rg:cl_activate_fs(0.935):/oracle[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(0.936):/oracle[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(0.937):/oracle[fs_mount:115] clodmget -q 'name = oraclelv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(0.940):/oracle[fs_mount:115] CuAt_label=/oracle +epprd_rg:cl_activate_fs(0.940):/oracle[fs_mount:118] : At this point, if things are working correctly, /oracle from /etc/filesystems +epprd_rg:cl_activate_fs(0.940):/oracle[fs_mount:119] : should match /oracle from CuAt ODM and /oracle from the LVCB +epprd_rg:cl_activate_fs(0.940):/oracle[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(0.940):/oracle[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(0.940):/oracle[fs_mount:123] [[ /oracle != /oracle ]] +epprd_rg:cl_activate_fs(0.940):/oracle[fs_mount:128] [[ /oracle != /oracle ]] +epprd_rg:cl_activate_fs(0.940):/oracle[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(0.940):/oracle[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(0.941):/oracle[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(0.960):/oracle[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(0.960):/oracle[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(0.960):/oracle[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle' +epprd_rg:cl_activate_fs(0.960):/oracle[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(0.961):/oracle[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(0.985):/oracle[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(0.988):/oracle[amlog_trace:319] DATE=2023-01-28T18:03:45.328010 +epprd_rg:cl_activate_fs(0.988):/oracle[amlog_trace:320] echo '|2023-01-28T18:03:45.328010|INFO: Activating Filesystem|/oracle' +epprd_rg:cl_activate_fs(0.988):/oracle[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(0.988):/oracle[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(0.990):/oracle[fs_mount:162] : Try to mount filesystem /oracle at Jan 28 18:03:45.000 +epprd_rg:cl_activate_fs(0.990):/oracle[fs_mount:163] mount /oracle +epprd_rg:cl_activate_fs(1.002):/oracle[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.002):/oracle[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.002):/oracle[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.002):/oracle[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle' +epprd_rg:cl_activate_fs(1.002):/oracle[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.003):/oracle[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.027):/oracle[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.029):/oracle[amlog_trace:319] DATE=2023-01-28T18:03:45.369615 +epprd_rg:cl_activate_fs(1.029):/oracle[amlog_trace:320] echo '|2023-01-28T18:03:45.369615|INFO: Activating Filesystems completed|/oracle' +epprd_rg:cl_activate_fs(1.029):/oracle[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.029):/oracle[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.029):/oracle[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.029):/oracle[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.029):/oracle[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.029):/oracle[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.029):/oracle[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.030):/oracle[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.030):/oracle[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.030):/oracle[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.030):/oracle[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.031):/oracle[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.031):/oracle[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.033):/oracle[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.033):/oracle[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.033):/oracle[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.033):/oracle[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.033):/oracle[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.033):/oracle[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.033):/oracle[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.033):/oracle[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(0.915):/oracle[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.4 \n\t lvname = oraclelv \n\t label = /oracle \n\t machine id = 44AF14B00 \n\t number lps = 40 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:42 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.033):/oracle[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.033):/oracle/EPP[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP +epprd_rg:cl_activate_fs(1.033):/oracle/EPP[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.033):/oracle/EPP[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.033):/oracle/EPP[activate_fs_process_group:540] fs_mount /oracle/EPP fsck epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(1.033):/oracle/EPP[fs_mount:69] FS=/oracle/EPP +epprd_rg:cl_activate_fs(1.033):/oracle/EPP[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.034):/oracle/EPP[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.034):/oracle/EPP[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.034):/oracle/EPP[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(1.034):/oracle/EPP[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.034):/oracle/EPP[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.034):/oracle/EPP[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.034):/oracle/EPP[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.034):/oracle/EPP[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.034):/oracle/EPP[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.034):/oracle/EPP[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.034):/oracle/EPP[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.034):/oracle/EPP[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.034):/oracle/EPP[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.034):/oracle/EPP[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.034):/oracle/EPP[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.034):/oracle/EPP[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP +epprd_rg:cl_activate_fs(1.034):/oracle/EPP[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.034):/oracle/EPP[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.034):/oracle/EPP[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.034):/oracle/EPP[fs_mount:86] lsfs -c /oracle/EPP +epprd_rg:cl_activate_fs(1.035):/oracle/EPP[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.040):/oracle/EPP[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.035):/oracle/EPP[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP:/dev/epplv:jfs2:::62914560:rw:no:no' +epprd_rg:cl_activate_fs(1.040):/oracle/EPP[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.040):/oracle/EPP[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.041):/oracle/EPP[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.035):/oracle/EPP[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP:/dev/epplv:jfs2:::62914560:rw:no:no' +epprd_rg:cl_activate_fs(1.042):/oracle/EPP[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.042):/oracle/EPP[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.042):/oracle/EPP[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.044):/oracle/EPP[fs_mount:100] LV_name=epplv +epprd_rg:cl_activate_fs(1.044):/oracle/EPP[fs_mount:101] getlvcb -T -A epplv +epprd_rg:cl_activate_fs(1.045):/oracle/EPP[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.063):/oracle/EPP[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.045):/oracle/EPP[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.5 \n\t lvname = epplv \n\t label = /oracle/EPP \n\t machine id = 44AF14B00 \n\t number lps = 60 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' +epprd_rg:cl_activate_fs(1.063):/oracle/EPP[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.063):/oracle/EPP[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.064):/oracle/EPP[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.045):/oracle/EPP[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.5 \n\t lvname = epplv \n\t label = /oracle/EPP \n\t machine id = 44AF14B00 \n\t number lps = 60 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' +epprd_rg:cl_activate_fs(1.064):/oracle/EPP[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.066):/oracle/EPP[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.066):/oracle/EPP[fs_mount:115] clodmget -q 'name = epplv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.070):/oracle/EPP[fs_mount:115] CuAt_label=/oracle/EPP +epprd_rg:cl_activate_fs(1.070):/oracle/EPP[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP from /etc/filesystems +epprd_rg:cl_activate_fs(1.070):/oracle/EPP[fs_mount:119] : should match /oracle/EPP from CuAt ODM and /oracle/EPP from the LVCB +epprd_rg:cl_activate_fs(1.070):/oracle/EPP[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.070):/oracle/EPP[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.070):/oracle/EPP[fs_mount:123] [[ /oracle/EPP != /oracle/EPP ]] +epprd_rg:cl_activate_fs(1.070):/oracle/EPP[fs_mount:128] [[ /oracle/EPP != /oracle/EPP ]] +epprd_rg:cl_activate_fs(1.070):/oracle/EPP[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.070):/oracle/EPP[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.070):/oracle/EPP[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.090):/oracle/EPP[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.090):/oracle/EPP[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.090):/oracle/EPP[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP' +epprd_rg:cl_activate_fs(1.090):/oracle/EPP[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.091):/oracle/EPP[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.115):/oracle/EPP[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.117):/oracle/EPP[amlog_trace:319] DATE=2023-01-28T18:03:45.457500 +epprd_rg:cl_activate_fs(1.117):/oracle/EPP[amlog_trace:320] echo '|2023-01-28T18:03:45.457500|INFO: Activating Filesystem|/oracle/EPP' +epprd_rg:cl_activate_fs(1.117):/oracle/EPP[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.117):/oracle/EPP[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.120):/oracle/EPP[fs_mount:162] : Try to mount filesystem /oracle/EPP at Jan 28 18:03:45.000 +epprd_rg:cl_activate_fs(1.120):/oracle/EPP[fs_mount:163] mount /oracle/EPP +epprd_rg:cl_activate_fs(1.131):/oracle/EPP[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.132):/oracle/EPP[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.132):/oracle/EPP[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.132):/oracle/EPP[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP' +epprd_rg:cl_activate_fs(1.132):/oracle/EPP[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.132):/oracle/EPP[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.156):/oracle/EPP[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.159):/oracle/EPP[amlog_trace:319] DATE=2023-01-28T18:03:45.499272 +epprd_rg:cl_activate_fs(1.159):/oracle/EPP[amlog_trace:320] echo '|2023-01-28T18:03:45.499272|INFO: Activating Filesystems completed|/oracle/EPP' +epprd_rg:cl_activate_fs(1.159):/oracle/EPP[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.159):/oracle/EPP[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.159):/oracle/EPP[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.159):/oracle/EPP[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.159):/oracle/EPP[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.159):/oracle/EPP[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.159):/oracle/EPP[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.159):/oracle/EPP[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.159):/oracle/EPP[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.159):/oracle/EPP[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.159):/oracle/EPP[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.160):/oracle/EPP[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.161):/oracle/EPP[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.163):/oracle/EPP[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.163):/oracle/EPP[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.163):/oracle/EPP[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.163):/oracle/EPP[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.163):/oracle/EPP[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.163):/oracle/EPP[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.163):/oracle/EPP[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.163):/oracle/EPP[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.045):/oracle/EPP[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.5 \n\t lvname = epplv \n\t label = /oracle/EPP \n\t machine id = 44AF14B00 \n\t number lps = 60 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.163):/oracle/EPP[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[activate_fs_process_group:540] fs_mount /oracle/EPP/mirrlogA fsck epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[fs_mount:69] FS=/oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[fs_mount:86] lsfs -c /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.164):/oracle/EPP/mirrlogA[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.169):/oracle/EPP/mirrlogA[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.165):/oracle/EPP/mirrlogA[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogA:/dev/mirrlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.169):/oracle/EPP/mirrlogA[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.169):/oracle/EPP/mirrlogA[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.170):/oracle/EPP/mirrlogA[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.165):/oracle/EPP/mirrlogA[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogA:/dev/mirrlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.171):/oracle/EPP/mirrlogA[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.172):/oracle/EPP/mirrlogA[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.172):/oracle/EPP/mirrlogA[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.173):/oracle/EPP/mirrlogA[fs_mount:100] LV_name=mirrlogAlv +epprd_rg:cl_activate_fs(1.173):/oracle/EPP/mirrlogA[fs_mount:101] getlvcb -T -A mirrlogAlv +epprd_rg:cl_activate_fs(1.174):/oracle/EPP/mirrlogA[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.193):/oracle/EPP/mirrlogA[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.174):/oracle/EPP/mirrlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.14 \n\t lvname = mirrlogAlv \n\t label = /oracle/EPP/mirrlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' +epprd_rg:cl_activate_fs(1.193):/oracle/EPP/mirrlogA[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.193):/oracle/EPP/mirrlogA[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.194):/oracle/EPP/mirrlogA[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.174):/oracle/EPP/mirrlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.14 \n\t lvname = mirrlogAlv \n\t label = /oracle/EPP/mirrlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' +epprd_rg:cl_activate_fs(1.195):/oracle/EPP/mirrlogA[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.195):/oracle/EPP/mirrlogA[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.197):/oracle/EPP/mirrlogA[fs_mount:115] clodmget -q 'name = mirrlogAlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.200):/oracle/EPP/mirrlogA[fs_mount:115] CuAt_label=/oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.200):/oracle/EPP/mirrlogA[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/mirrlogA from /etc/filesystems +epprd_rg:cl_activate_fs(1.200):/oracle/EPP/mirrlogA[fs_mount:119] : should match /oracle/EPP/mirrlogA from CuAt ODM and /oracle/EPP/mirrlogA from the LVCB +epprd_rg:cl_activate_fs(1.200):/oracle/EPP/mirrlogA[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.200):/oracle/EPP/mirrlogA[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.200):/oracle/EPP/mirrlogA[fs_mount:123] [[ /oracle/EPP/mirrlogA != /oracle/EPP/mirrlogA ]] +epprd_rg:cl_activate_fs(1.200):/oracle/EPP/mirrlogA[fs_mount:128] [[ /oracle/EPP/mirrlogA != /oracle/EPP/mirrlogA ]] +epprd_rg:cl_activate_fs(1.200):/oracle/EPP/mirrlogA[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.200):/oracle/EPP/mirrlogA[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.200):/oracle/EPP/mirrlogA[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.220):/oracle/EPP/mirrlogA[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.220):/oracle/EPP/mirrlogA[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.220):/oracle/EPP/mirrlogA[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_activate_fs(1.220):/oracle/EPP/mirrlogA[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.221):/oracle/EPP/mirrlogA[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.245):/oracle/EPP/mirrlogA[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.248):/oracle/EPP/mirrlogA[amlog_trace:319] DATE=2023-01-28T18:03:45.588097 +epprd_rg:cl_activate_fs(1.248):/oracle/EPP/mirrlogA[amlog_trace:320] echo '|2023-01-28T18:03:45.588097|INFO: Activating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_activate_fs(1.248):/oracle/EPP/mirrlogA[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.248):/oracle/EPP/mirrlogA[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.250):/oracle/EPP/mirrlogA[fs_mount:162] : Try to mount filesystem /oracle/EPP/mirrlogA at Jan 28 18:03:45.000 +epprd_rg:cl_activate_fs(1.251):/oracle/EPP/mirrlogA[fs_mount:163] mount /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.262):/oracle/EPP/mirrlogA[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.262):/oracle/EPP/mirrlogA[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.262):/oracle/EPP/mirrlogA[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.262):/oracle/EPP/mirrlogA[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/mirrlogA' +epprd_rg:cl_activate_fs(1.262):/oracle/EPP/mirrlogA[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.263):/oracle/EPP/mirrlogA[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.287):/oracle/EPP/mirrlogA[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.290):/oracle/EPP/mirrlogA[amlog_trace:319] DATE=2023-01-28T18:03:45.630003 +epprd_rg:cl_activate_fs(1.290):/oracle/EPP/mirrlogA[amlog_trace:320] echo '|2023-01-28T18:03:45.630003|INFO: Activating Filesystems completed|/oracle/EPP/mirrlogA' +epprd_rg:cl_activate_fs(1.290):/oracle/EPP/mirrlogA[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.290):/oracle/EPP/mirrlogA[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.290):/oracle/EPP/mirrlogA[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.290):/oracle/EPP/mirrlogA[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.290):/oracle/EPP/mirrlogA[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.290):/oracle/EPP/mirrlogA[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.290):/oracle/EPP/mirrlogA[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.290):/oracle/EPP/mirrlogA[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.290):/oracle/EPP/mirrlogA[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.290):/oracle/EPP/mirrlogA[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.290):/oracle/EPP/mirrlogA[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.291):/oracle/EPP/mirrlogA[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.291):/oracle/EPP/mirrlogA[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.293):/oracle/EPP/mirrlogA[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.293):/oracle/EPP/mirrlogA[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogA[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogA[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogA[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogA[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogA[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogA[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.174):/oracle/EPP/mirrlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.14 \n\t lvname = mirrlogAlv \n\t label = /oracle/EPP/mirrlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogA[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[activate_fs_process_group:540] fs_mount /oracle/EPP/mirrlogB fsck epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[fs_mount:69] FS=/oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.294):/oracle/EPP/mirrlogB[fs_mount:86] lsfs -c /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.295):/oracle/EPP/mirrlogB[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.300):/oracle/EPP/mirrlogB[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.295):/oracle/EPP/mirrlogB[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogB:/dev/mirrlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.300):/oracle/EPP/mirrlogB[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.300):/oracle/EPP/mirrlogB[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.301):/oracle/EPP/mirrlogB[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.295):/oracle/EPP/mirrlogB[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogB:/dev/mirrlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.302):/oracle/EPP/mirrlogB[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.302):/oracle/EPP/mirrlogB[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.303):/oracle/EPP/mirrlogB[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.304):/oracle/EPP/mirrlogB[fs_mount:100] LV_name=mirrlogBlv +epprd_rg:cl_activate_fs(1.304):/oracle/EPP/mirrlogB[fs_mount:101] getlvcb -T -A mirrlogBlv +epprd_rg:cl_activate_fs(1.305):/oracle/EPP/mirrlogB[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.322):/oracle/EPP/mirrlogB[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.305):/oracle/EPP/mirrlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.15 \n\t lvname = mirrlogBlv \n\t label = /oracle/EPP/mirrlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:50 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.322):/oracle/EPP/mirrlogB[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.322):/oracle/EPP/mirrlogB[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.323):/oracle/EPP/mirrlogB[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.305):/oracle/EPP/mirrlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.15 \n\t lvname = mirrlogBlv \n\t label = /oracle/EPP/mirrlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:50 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.324):/oracle/EPP/mirrlogB[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.326):/oracle/EPP/mirrlogB[fs_mount:115] clodmget -q 'name = mirrlogBlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.330):/oracle/EPP/mirrlogB[fs_mount:115] CuAt_label=/oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.330):/oracle/EPP/mirrlogB[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/mirrlogB from /etc/filesystems +epprd_rg:cl_activate_fs(1.330):/oracle/EPP/mirrlogB[fs_mount:119] : should match /oracle/EPP/mirrlogB from CuAt ODM and /oracle/EPP/mirrlogB from the LVCB +epprd_rg:cl_activate_fs(1.330):/oracle/EPP/mirrlogB[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.330):/oracle/EPP/mirrlogB[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.330):/oracle/EPP/mirrlogB[fs_mount:123] [[ /oracle/EPP/mirrlogB != /oracle/EPP/mirrlogB ]] +epprd_rg:cl_activate_fs(1.330):/oracle/EPP/mirrlogB[fs_mount:128] [[ /oracle/EPP/mirrlogB != /oracle/EPP/mirrlogB ]] +epprd_rg:cl_activate_fs(1.330):/oracle/EPP/mirrlogB[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.330):/oracle/EPP/mirrlogB[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.330):/oracle/EPP/mirrlogB[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.350):/oracle/EPP/mirrlogB[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.350):/oracle/EPP/mirrlogB[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.350):/oracle/EPP/mirrlogB[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_activate_fs(1.350):/oracle/EPP/mirrlogB[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.351):/oracle/EPP/mirrlogB[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.374):/oracle/EPP/mirrlogB[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.377):/oracle/EPP/mirrlogB[amlog_trace:319] DATE=2023-01-28T18:03:45.717264 +epprd_rg:cl_activate_fs(1.377):/oracle/EPP/mirrlogB[amlog_trace:320] echo '|2023-01-28T18:03:45.717264|INFO: Activating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_activate_fs(1.377):/oracle/EPP/mirrlogB[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.377):/oracle/EPP/mirrlogB[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.380):/oracle/EPP/mirrlogB[fs_mount:162] : Try to mount filesystem /oracle/EPP/mirrlogB at Jan 28 18:03:45.000 +epprd_rg:cl_activate_fs(1.380):/oracle/EPP/mirrlogB[fs_mount:163] mount /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.391):/oracle/EPP/mirrlogB[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.391):/oracle/EPP/mirrlogB[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.391):/oracle/EPP/mirrlogB[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.391):/oracle/EPP/mirrlogB[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/mirrlogB' +epprd_rg:cl_activate_fs(1.391):/oracle/EPP/mirrlogB[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.392):/oracle/EPP/mirrlogB[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.416):/oracle/EPP/mirrlogB[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.419):/oracle/EPP/mirrlogB[amlog_trace:319] DATE=2023-01-28T18:03:45.758923 +epprd_rg:cl_activate_fs(1.419):/oracle/EPP/mirrlogB[amlog_trace:320] echo '|2023-01-28T18:03:45.758923|INFO: Activating Filesystems completed|/oracle/EPP/mirrlogB' +epprd_rg:cl_activate_fs(1.419):/oracle/EPP/mirrlogB[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.419):/oracle/EPP/mirrlogB[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.419):/oracle/EPP/mirrlogB[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.419):/oracle/EPP/mirrlogB[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.419):/oracle/EPP/mirrlogB[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.419):/oracle/EPP/mirrlogB[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.419):/oracle/EPP/mirrlogB[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.419):/oracle/EPP/mirrlogB[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.419):/oracle/EPP/mirrlogB[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.419):/oracle/EPP/mirrlogB[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.419):/oracle/EPP/mirrlogB[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.420):/oracle/EPP/mirrlogB[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.421):/oracle/EPP/mirrlogB[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.422):/oracle/EPP/mirrlogB[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.422):/oracle/EPP/mirrlogB[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/mirrlogB[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/mirrlogB[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/mirrlogB[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/mirrlogB[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/mirrlogB[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/mirrlogB[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.305):/oracle/EPP/mirrlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.15 \n\t lvname = mirrlogBlv \n\t label = /oracle/EPP/mirrlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:50 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/mirrlogB[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/oraarch +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[activate_fs_process_group:540] fs_mount /oracle/EPP/oraarch fsck epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[fs_mount:69] FS=/oracle/EPP/oraarch +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/oraarch +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[fs_mount:86] lsfs -c /oracle/EPP/oraarch +epprd_rg:cl_activate_fs(1.424):/oracle/EPP/oraarch[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.429):/oracle/EPP/oraarch[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.424):/oracle/EPP/oraarch[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/oraarch:/dev/oraarchlv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(1.429):/oracle/EPP/oraarch[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.429):/oracle/EPP/oraarch[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.430):/oracle/EPP/oraarch[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.424):/oracle/EPP/oraarch[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/oraarch:/dev/oraarchlv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(1.431):/oracle/EPP/oraarch[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.432):/oracle/EPP/oraarch[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.432):/oracle/EPP/oraarch[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.433):/oracle/EPP/oraarch[fs_mount:100] LV_name=oraarchlv +epprd_rg:cl_activate_fs(1.433):/oracle/EPP/oraarch[fs_mount:101] getlvcb -T -A oraarchlv +epprd_rg:cl_activate_fs(1.434):/oracle/EPP/oraarch[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.452):/oracle/EPP/oraarch[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.434):/oracle/EPP/oraarch[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.6 \n\t lvname = oraarchlv \n\t label = /oracle/EPP/oraarch \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.452):/oracle/EPP/oraarch[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.452):/oracle/EPP/oraarch[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.454):/oracle/EPP/oraarch[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.454):/oracle/EPP/oraarch[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.434):/oracle/EPP/oraarch[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.6 \n\t lvname = oraarchlv \n\t label = /oracle/EPP/oraarch \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.455):/oracle/EPP/oraarch[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.455):/oracle/EPP/oraarch[fs_mount:115] clodmget -q 'name = oraarchlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.459):/oracle/EPP/oraarch[fs_mount:115] CuAt_label=/oracle/EPP/oraarch +epprd_rg:cl_activate_fs(1.459):/oracle/EPP/oraarch[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/oraarch from /etc/filesystems +epprd_rg:cl_activate_fs(1.459):/oracle/EPP/oraarch[fs_mount:119] : should match /oracle/EPP/oraarch from CuAt ODM and /oracle/EPP/oraarch from the LVCB +epprd_rg:cl_activate_fs(1.459):/oracle/EPP/oraarch[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.459):/oracle/EPP/oraarch[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.459):/oracle/EPP/oraarch[fs_mount:123] [[ /oracle/EPP/oraarch != /oracle/EPP/oraarch ]] +epprd_rg:cl_activate_fs(1.459):/oracle/EPP/oraarch[fs_mount:128] [[ /oracle/EPP/oraarch != /oracle/EPP/oraarch ]] +epprd_rg:cl_activate_fs(1.459):/oracle/EPP/oraarch[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.459):/oracle/EPP/oraarch[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.459):/oracle/EPP/oraarch[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.479):/oracle/EPP/oraarch[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.479):/oracle/EPP/oraarch[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.479):/oracle/EPP/oraarch[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_activate_fs(1.479):/oracle/EPP/oraarch[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.480):/oracle/EPP/oraarch[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.503):/oracle/EPP/oraarch[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.506):/oracle/EPP/oraarch[amlog_trace:319] DATE=2023-01-28T18:03:45.846329 +epprd_rg:cl_activate_fs(1.506):/oracle/EPP/oraarch[amlog_trace:320] echo '|2023-01-28T18:03:45.846329|INFO: Activating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_activate_fs(1.506):/oracle/EPP/oraarch[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.506):/oracle/EPP/oraarch[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.509):/oracle/EPP/oraarch[fs_mount:162] : Try to mount filesystem /oracle/EPP/oraarch at Jan 28 18:03:45.000 +epprd_rg:cl_activate_fs(1.509):/oracle/EPP/oraarch[fs_mount:163] mount /oracle/EPP/oraarch +epprd_rg:cl_activate_fs(1.520):/oracle/EPP/oraarch[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.520):/oracle/EPP/oraarch[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.520):/oracle/EPP/oraarch[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.520):/oracle/EPP/oraarch[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/oraarch' +epprd_rg:cl_activate_fs(1.520):/oracle/EPP/oraarch[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.521):/oracle/EPP/oraarch[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.545):/oracle/EPP/oraarch[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.548):/oracle/EPP/oraarch[amlog_trace:319] DATE=2023-01-28T18:03:45.888187 +epprd_rg:cl_activate_fs(1.548):/oracle/EPP/oraarch[amlog_trace:320] echo '|2023-01-28T18:03:45.888187|INFO: Activating Filesystems completed|/oracle/EPP/oraarch' +epprd_rg:cl_activate_fs(1.548):/oracle/EPP/oraarch[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.548):/oracle/EPP/oraarch[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.548):/oracle/EPP/oraarch[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.548):/oracle/EPP/oraarch[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.548):/oracle/EPP/oraarch[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.548):/oracle/EPP/oraarch[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.548):/oracle/EPP/oraarch[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.548):/oracle/EPP/oraarch[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.548):/oracle/EPP/oraarch[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.548):/oracle/EPP/oraarch[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.548):/oracle/EPP/oraarch[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.549):/oracle/EPP/oraarch[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.550):/oracle/EPP/oraarch[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/oraarch[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/oraarch[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/oraarch[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/oraarch[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/oraarch[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/oraarch[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/oraarch[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/oraarch[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.434):/oracle/EPP/oraarch[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.6 \n\t lvname = oraarchlv \n\t label = /oracle/EPP/oraarch \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/oraarch[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/origlogA +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[activate_fs_process_group:540] fs_mount /oracle/EPP/origlogA fsck epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[fs_mount:69] FS=/oracle/EPP/origlogA +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/origlogA +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[fs_mount:86] lsfs -c /oracle/EPP/origlogA +epprd_rg:cl_activate_fs(1.553):/oracle/EPP/origlogA[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.558):/oracle/EPP/origlogA[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.554):/oracle/EPP/origlogA[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogA:/dev/origlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.558):/oracle/EPP/origlogA[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.558):/oracle/EPP/origlogA[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.559):/oracle/EPP/origlogA[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.554):/oracle/EPP/origlogA[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogA:/dev/origlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.560):/oracle/EPP/origlogA[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.561):/oracle/EPP/origlogA[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.561):/oracle/EPP/origlogA[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.562):/oracle/EPP/origlogA[fs_mount:100] LV_name=origlogAlv +epprd_rg:cl_activate_fs(1.562):/oracle/EPP/origlogA[fs_mount:101] getlvcb -T -A origlogAlv +epprd_rg:cl_activate_fs(1.563):/oracle/EPP/origlogA[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.581):/oracle/EPP/origlogA[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.563):/oracle/EPP/origlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.12 \n\t lvname = origlogAlv \n\t label = /oracle/EPP/origlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:48 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.581):/oracle/EPP/origlogA[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.581):/oracle/EPP/origlogA[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.563):/oracle/EPP/origlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.12 \n\t lvname = origlogAlv \n\t label = /oracle/EPP/origlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:48 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.583):/oracle/EPP/origlogA[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.584):/oracle/EPP/origlogA[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.585):/oracle/EPP/origlogA[fs_mount:115] clodmget -q 'name = origlogAlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.588):/oracle/EPP/origlogA[fs_mount:115] CuAt_label=/oracle/EPP/origlogA +epprd_rg:cl_activate_fs(1.588):/oracle/EPP/origlogA[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/origlogA from /etc/filesystems +epprd_rg:cl_activate_fs(1.588):/oracle/EPP/origlogA[fs_mount:119] : should match /oracle/EPP/origlogA from CuAt ODM and /oracle/EPP/origlogA from the LVCB +epprd_rg:cl_activate_fs(1.588):/oracle/EPP/origlogA[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.588):/oracle/EPP/origlogA[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.588):/oracle/EPP/origlogA[fs_mount:123] [[ /oracle/EPP/origlogA != /oracle/EPP/origlogA ]] +epprd_rg:cl_activate_fs(1.589):/oracle/EPP/origlogA[fs_mount:128] [[ /oracle/EPP/origlogA != /oracle/EPP/origlogA ]] +epprd_rg:cl_activate_fs(1.589):/oracle/EPP/origlogA[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.589):/oracle/EPP/origlogA[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.589):/oracle/EPP/origlogA[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.608):/oracle/EPP/origlogA[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.608):/oracle/EPP/origlogA[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.608):/oracle/EPP/origlogA[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_activate_fs(1.608):/oracle/EPP/origlogA[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.609):/oracle/EPP/origlogA[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.633):/oracle/EPP/origlogA[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.636):/oracle/EPP/origlogA[amlog_trace:319] DATE=2023-01-28T18:03:45.976012 +epprd_rg:cl_activate_fs(1.636):/oracle/EPP/origlogA[amlog_trace:320] echo '|2023-01-28T18:03:45.976012|INFO: Activating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_activate_fs(1.636):/oracle/EPP/origlogA[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.636):/oracle/EPP/origlogA[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.638):/oracle/EPP/origlogA[fs_mount:162] : Try to mount filesystem /oracle/EPP/origlogA at Jan 28 18:03:45.000 +epprd_rg:cl_activate_fs(1.638):/oracle/EPP/origlogA[fs_mount:163] mount /oracle/EPP/origlogA +epprd_rg:cl_activate_fs(1.650):/oracle/EPP/origlogA[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.650):/oracle/EPP/origlogA[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.650):/oracle/EPP/origlogA[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.650):/oracle/EPP/origlogA[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/origlogA' +epprd_rg:cl_activate_fs(1.650):/oracle/EPP/origlogA[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.651):/oracle/EPP/origlogA[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.674):/oracle/EPP/origlogA[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.677):/oracle/EPP/origlogA[amlog_trace:319] DATE=2023-01-28T18:03:46.017404 +epprd_rg:cl_activate_fs(1.677):/oracle/EPP/origlogA[amlog_trace:320] echo '|2023-01-28T18:03:46.017404|INFO: Activating Filesystems completed|/oracle/EPP/origlogA' +epprd_rg:cl_activate_fs(1.677):/oracle/EPP/origlogA[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.677):/oracle/EPP/origlogA[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.677):/oracle/EPP/origlogA[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.677):/oracle/EPP/origlogA[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.677):/oracle/EPP/origlogA[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.677):/oracle/EPP/origlogA[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.677):/oracle/EPP/origlogA[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.677):/oracle/EPP/origlogA[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.677):/oracle/EPP/origlogA[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.677):/oracle/EPP/origlogA[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.677):/oracle/EPP/origlogA[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.678):/oracle/EPP/origlogA[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.679):/oracle/EPP/origlogA[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogA[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogA[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogA[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogA[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogA[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogA[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogA[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogA[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.563):/oracle/EPP/origlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.12 \n\t lvname = origlogAlv \n\t label = /oracle/EPP/origlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:48 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogA[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogB[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/origlogB +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogB[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogB[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogB[activate_fs_process_group:540] fs_mount /oracle/EPP/origlogB fsck epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogB[fs_mount:69] FS=/oracle/EPP/origlogB +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogB[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogB[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogB[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogB[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogB[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogB[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogB[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogB[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogB[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogB[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.682):/oracle/EPP/origlogB[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.682):/oracle/EPP/origlogB[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.682):/oracle/EPP/origlogB[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.682):/oracle/EPP/origlogB[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.682):/oracle/EPP/origlogB[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.682):/oracle/EPP/origlogB[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.682):/oracle/EPP/origlogB[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/origlogB +epprd_rg:cl_activate_fs(1.682):/oracle/EPP/origlogB[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.682):/oracle/EPP/origlogB[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.682):/oracle/EPP/origlogB[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.682):/oracle/EPP/origlogB[fs_mount:86] lsfs -c /oracle/EPP/origlogB +epprd_rg:cl_activate_fs(1.683):/oracle/EPP/origlogB[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.687):/oracle/EPP/origlogB[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.683):/oracle/EPP/origlogB[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogB:/dev/origlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.687):/oracle/EPP/origlogB[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.687):/oracle/EPP/origlogB[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.688):/oracle/EPP/origlogB[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.683):/oracle/EPP/origlogB[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogB:/dev/origlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.689):/oracle/EPP/origlogB[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.690):/oracle/EPP/origlogB[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.690):/oracle/EPP/origlogB[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.691):/oracle/EPP/origlogB[fs_mount:100] LV_name=origlogBlv +epprd_rg:cl_activate_fs(1.691):/oracle/EPP/origlogB[fs_mount:101] getlvcb -T -A origlogBlv +epprd_rg:cl_activate_fs(1.692):/oracle/EPP/origlogB[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.710):/oracle/EPP/origlogB[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.692):/oracle/EPP/origlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.13 \n\t lvname = origlogBlv \n\t label = /oracle/EPP/origlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.710):/oracle/EPP/origlogB[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.710):/oracle/EPP/origlogB[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.711):/oracle/EPP/origlogB[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.692):/oracle/EPP/origlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.13 \n\t lvname = origlogBlv \n\t label = /oracle/EPP/origlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.712):/oracle/EPP/origlogB[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.713):/oracle/EPP/origlogB[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.714):/oracle/EPP/origlogB[fs_mount:115] clodmget -q 'name = origlogBlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.718):/oracle/EPP/origlogB[fs_mount:115] CuAt_label=/oracle/EPP/origlogB +epprd_rg:cl_activate_fs(1.718):/oracle/EPP/origlogB[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/origlogB from /etc/filesystems +epprd_rg:cl_activate_fs(1.718):/oracle/EPP/origlogB[fs_mount:119] : should match /oracle/EPP/origlogB from CuAt ODM and /oracle/EPP/origlogB from the LVCB +epprd_rg:cl_activate_fs(1.718):/oracle/EPP/origlogB[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.718):/oracle/EPP/origlogB[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.718):/oracle/EPP/origlogB[fs_mount:123] [[ /oracle/EPP/origlogB != /oracle/EPP/origlogB ]] +epprd_rg:cl_activate_fs(1.718):/oracle/EPP/origlogB[fs_mount:128] [[ /oracle/EPP/origlogB != /oracle/EPP/origlogB ]] +epprd_rg:cl_activate_fs(1.718):/oracle/EPP/origlogB[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.718):/oracle/EPP/origlogB[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.718):/oracle/EPP/origlogB[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.738):/oracle/EPP/origlogB[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.738):/oracle/EPP/origlogB[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.738):/oracle/EPP/origlogB[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_activate_fs(1.738):/oracle/EPP/origlogB[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.739):/oracle/EPP/origlogB[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.763):/oracle/EPP/origlogB[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.766):/oracle/EPP/origlogB[amlog_trace:319] DATE=2023-01-28T18:03:46.105842 +epprd_rg:cl_activate_fs(1.766):/oracle/EPP/origlogB[amlog_trace:320] echo '|2023-01-28T18:03:46.105842|INFO: Activating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_activate_fs(1.766):/oracle/EPP/origlogB[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.766):/oracle/EPP/origlogB[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.768):/oracle/EPP/origlogB[fs_mount:162] : Try to mount filesystem /oracle/EPP/origlogB at Jan 28 18:03:46.000 +epprd_rg:cl_activate_fs(1.768):/oracle/EPP/origlogB[fs_mount:163] mount /oracle/EPP/origlogB +epprd_rg:cl_activate_fs(1.779):/oracle/EPP/origlogB[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.780):/oracle/EPP/origlogB[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.780):/oracle/EPP/origlogB[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.780):/oracle/EPP/origlogB[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/origlogB' +epprd_rg:cl_activate_fs(1.780):/oracle/EPP/origlogB[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.780):/oracle/EPP/origlogB[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.804):/oracle/EPP/origlogB[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.807):/oracle/EPP/origlogB[amlog_trace:319] DATE=2023-01-28T18:03:46.147450 +epprd_rg:cl_activate_fs(1.807):/oracle/EPP/origlogB[amlog_trace:320] echo '|2023-01-28T18:03:46.147450|INFO: Activating Filesystems completed|/oracle/EPP/origlogB' +epprd_rg:cl_activate_fs(1.807):/oracle/EPP/origlogB[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.807):/oracle/EPP/origlogB[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.807):/oracle/EPP/origlogB[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.807):/oracle/EPP/origlogB[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.807):/oracle/EPP/origlogB[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.807):/oracle/EPP/origlogB[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.807):/oracle/EPP/origlogB[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.807):/oracle/EPP/origlogB[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.807):/oracle/EPP/origlogB[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.807):/oracle/EPP/origlogB[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.807):/oracle/EPP/origlogB[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.808):/oracle/EPP/origlogB[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.809):/oracle/EPP/origlogB[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.811):/oracle/EPP/origlogB[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.811):/oracle/EPP/origlogB[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.811):/oracle/EPP/origlogB[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.811):/oracle/EPP/origlogB[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.811):/oracle/EPP/origlogB[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.811):/oracle/EPP/origlogB[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.811):/oracle/EPP/origlogB[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.811):/oracle/EPP/origlogB[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.692):/oracle/EPP/origlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.13 \n\t lvname = origlogBlv \n\t label = /oracle/EPP/origlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.811):/oracle/EPP/origlogB[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.811):/oracle/EPP/sapdata1[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(1.811):/oracle/EPP/sapdata1[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.811):/oracle/EPP/sapdata1[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.811):/oracle/EPP/sapdata1[activate_fs_process_group:540] fs_mount /oracle/EPP/sapdata1 fsck epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(1.811):/oracle/EPP/sapdata1[fs_mount:69] FS=/oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(1.811):/oracle/EPP/sapdata1[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.811):/oracle/EPP/sapdata1[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.811):/oracle/EPP/sapdata1[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.812):/oracle/EPP/sapdata1[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(1.812):/oracle/EPP/sapdata1[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.812):/oracle/EPP/sapdata1[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.812):/oracle/EPP/sapdata1[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.812):/oracle/EPP/sapdata1[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.812):/oracle/EPP/sapdata1[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.812):/oracle/EPP/sapdata1[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.812):/oracle/EPP/sapdata1[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.812):/oracle/EPP/sapdata1[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.812):/oracle/EPP/sapdata1[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.812):/oracle/EPP/sapdata1[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.812):/oracle/EPP/sapdata1[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.812):/oracle/EPP/sapdata1[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.812):/oracle/EPP/sapdata1[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(1.812):/oracle/EPP/sapdata1[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.812):/oracle/EPP/sapdata1[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.812):/oracle/EPP/sapdata1[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.812):/oracle/EPP/sapdata1[fs_mount:86] lsfs -c /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(1.813):/oracle/EPP/sapdata1[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.818):/oracle/EPP/sapdata1[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.813):/oracle/EPP/sapdata1[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata1:/dev/sapdata1lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(1.818):/oracle/EPP/sapdata1[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.818):/oracle/EPP/sapdata1[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.819):/oracle/EPP/sapdata1[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.813):/oracle/EPP/sapdata1[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata1:/dev/sapdata1lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(1.819):/oracle/EPP/sapdata1[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.820):/oracle/EPP/sapdata1[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.821):/oracle/EPP/sapdata1[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.822):/oracle/EPP/sapdata1[fs_mount:100] LV_name=sapdata1lv +epprd_rg:cl_activate_fs(1.822):/oracle/EPP/sapdata1[fs_mount:101] getlvcb -T -A sapdata1lv +epprd_rg:cl_activate_fs(1.823):/oracle/EPP/sapdata1[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.840):/oracle/EPP/sapdata1[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.823):/oracle/EPP/sapdata1[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.7 \n\t lvname = sapdata1lv \n\t label = /oracle/EPP/sapdata1 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:44 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' +epprd_rg:cl_activate_fs(1.840):/oracle/EPP/sapdata1[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.840):/oracle/EPP/sapdata1[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.841):/oracle/EPP/sapdata1[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.823):/oracle/EPP/sapdata1[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.7 \n\t lvname = sapdata1lv \n\t label = /oracle/EPP/sapdata1 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:44 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' +epprd_rg:cl_activate_fs(1.842):/oracle/EPP/sapdata1[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.843):/oracle/EPP/sapdata1[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.844):/oracle/EPP/sapdata1[fs_mount:115] clodmget -q 'name = sapdata1lv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/sapdata1[fs_mount:115] CuAt_label=/oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/sapdata1[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/sapdata1 from /etc/filesystems +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/sapdata1[fs_mount:119] : should match /oracle/EPP/sapdata1 from CuAt ODM and /oracle/EPP/sapdata1 from the LVCB +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/sapdata1[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/sapdata1[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/sapdata1[fs_mount:123] [[ /oracle/EPP/sapdata1 != /oracle/EPP/sapdata1 ]] +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/sapdata1[fs_mount:128] [[ /oracle/EPP/sapdata1 != /oracle/EPP/sapdata1 ]] +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/sapdata1[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/sapdata1[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/sapdata1[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.868):/oracle/EPP/sapdata1[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.868):/oracle/EPP/sapdata1[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.868):/oracle/EPP/sapdata1[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_activate_fs(1.868):/oracle/EPP/sapdata1[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.869):/oracle/EPP/sapdata1[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.893):/oracle/EPP/sapdata1[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.895):/oracle/EPP/sapdata1[amlog_trace:319] DATE=2023-01-28T18:03:46.235703 +epprd_rg:cl_activate_fs(1.895):/oracle/EPP/sapdata1[amlog_trace:320] echo '|2023-01-28T18:03:46.235703|INFO: Activating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_activate_fs(1.895):/oracle/EPP/sapdata1[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.896):/oracle/EPP/sapdata1[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.898):/oracle/EPP/sapdata1[fs_mount:162] : Try to mount filesystem /oracle/EPP/sapdata1 at Jan 28 18:03:46.000 +epprd_rg:cl_activate_fs(1.898):/oracle/EPP/sapdata1[fs_mount:163] mount /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(1.910):/oracle/EPP/sapdata1[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.910):/oracle/EPP/sapdata1[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.910):/oracle/EPP/sapdata1[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.910):/oracle/EPP/sapdata1[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/sapdata1' +epprd_rg:cl_activate_fs(1.910):/oracle/EPP/sapdata1[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.911):/oracle/EPP/sapdata1[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.935):/oracle/EPP/sapdata1[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.938):/oracle/EPP/sapdata1[amlog_trace:319] DATE=2023-01-28T18:03:46.278005 +epprd_rg:cl_activate_fs(1.938):/oracle/EPP/sapdata1[amlog_trace:320] echo '|2023-01-28T18:03:46.278005|INFO: Activating Filesystems completed|/oracle/EPP/sapdata1' +epprd_rg:cl_activate_fs(1.938):/oracle/EPP/sapdata1[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.938):/oracle/EPP/sapdata1[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.938):/oracle/EPP/sapdata1[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.938):/oracle/EPP/sapdata1[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.938):/oracle/EPP/sapdata1[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.938):/oracle/EPP/sapdata1[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.938):/oracle/EPP/sapdata1[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.938):/oracle/EPP/sapdata1[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.938):/oracle/EPP/sapdata1[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.938):/oracle/EPP/sapdata1[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.938):/oracle/EPP/sapdata1[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/sapdata1[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.940):/oracle/EPP/sapdata1[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.941):/oracle/EPP/sapdata1[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata1[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata1[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata1[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata1[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata1[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata1[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata1[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.823):/oracle/EPP/sapdata1[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.7 \n\t lvname = sapdata1lv \n\t label = /oracle/EPP/sapdata1 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:44 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata1[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[activate_fs_process_group:540] fs_mount /oracle/EPP/sapdata2 fsck epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[fs_mount:69] FS=/oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[fs_mount:86] lsfs -c /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(1.943):/oracle/EPP/sapdata2[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.948):/oracle/EPP/sapdata2[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.943):/oracle/EPP/sapdata2[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata2:/dev/sapdata2lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(1.948):/oracle/EPP/sapdata2[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.948):/oracle/EPP/sapdata2[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.949):/oracle/EPP/sapdata2[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.943):/oracle/EPP/sapdata2[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata2:/dev/sapdata2lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(1.950):/oracle/EPP/sapdata2[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.951):/oracle/EPP/sapdata2[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.951):/oracle/EPP/sapdata2[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.952):/oracle/EPP/sapdata2[fs_mount:100] LV_name=sapdata2lv +epprd_rg:cl_activate_fs(1.952):/oracle/EPP/sapdata2[fs_mount:101] getlvcb -T -A sapdata2lv +epprd_rg:cl_activate_fs(1.953):/oracle/EPP/sapdata2[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.971):/oracle/EPP/sapdata2[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.953):/oracle/EPP/sapdata2[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.8 \n\t lvname = sapdata2lv \n\t label = /oracle/EPP/sapdata2 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:45 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' +epprd_rg:cl_activate_fs(1.971):/oracle/EPP/sapdata2[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.971):/oracle/EPP/sapdata2[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.972):/oracle/EPP/sapdata2[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.953):/oracle/EPP/sapdata2[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.8 \n\t lvname = sapdata2lv \n\t label = /oracle/EPP/sapdata2 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:45 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' +epprd_rg:cl_activate_fs(1.973):/oracle/EPP/sapdata2[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.974):/oracle/EPP/sapdata2[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.975):/oracle/EPP/sapdata2[fs_mount:115] clodmget -q 'name = sapdata2lv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.979):/oracle/EPP/sapdata2[fs_mount:115] CuAt_label=/oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(1.979):/oracle/EPP/sapdata2[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/sapdata2 from /etc/filesystems +epprd_rg:cl_activate_fs(1.979):/oracle/EPP/sapdata2[fs_mount:119] : should match /oracle/EPP/sapdata2 from CuAt ODM and /oracle/EPP/sapdata2 from the LVCB +epprd_rg:cl_activate_fs(1.979):/oracle/EPP/sapdata2[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.979):/oracle/EPP/sapdata2[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.979):/oracle/EPP/sapdata2[fs_mount:123] [[ /oracle/EPP/sapdata2 != /oracle/EPP/sapdata2 ]] +epprd_rg:cl_activate_fs(1.979):/oracle/EPP/sapdata2[fs_mount:128] [[ /oracle/EPP/sapdata2 != /oracle/EPP/sapdata2 ]] +epprd_rg:cl_activate_fs(1.979):/oracle/EPP/sapdata2[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.979):/oracle/EPP/sapdata2[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.979):/oracle/EPP/sapdata2[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.999):/oracle/EPP/sapdata2[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.999):/oracle/EPP/sapdata2[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.999):/oracle/EPP/sapdata2[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_activate_fs(1.999):/oracle/EPP/sapdata2[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.000):/oracle/EPP/sapdata2[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.024):/oracle/EPP/sapdata2[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.026):/oracle/EPP/sapdata2[amlog_trace:319] DATE=2023-01-28T18:03:46.366680 +epprd_rg:cl_activate_fs(2.026):/oracle/EPP/sapdata2[amlog_trace:320] echo '|2023-01-28T18:03:46.366680|INFO: Activating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_activate_fs(2.026):/oracle/EPP/sapdata2[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.027):/oracle/EPP/sapdata2[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(2.029):/oracle/EPP/sapdata2[fs_mount:162] : Try to mount filesystem /oracle/EPP/sapdata2 at Jan 28 18:03:46.000 +epprd_rg:cl_activate_fs(2.029):/oracle/EPP/sapdata2[fs_mount:163] mount /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(2.041):/oracle/EPP/sapdata2[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.041):/oracle/EPP/sapdata2[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(2.041):/oracle/EPP/sapdata2[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(2.041):/oracle/EPP/sapdata2[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/sapdata2' +epprd_rg:cl_activate_fs(2.041):/oracle/EPP/sapdata2[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.042):/oracle/EPP/sapdata2[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.066):/oracle/EPP/sapdata2[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.069):/oracle/EPP/sapdata2[amlog_trace:319] DATE=2023-01-28T18:03:46.409210 +epprd_rg:cl_activate_fs(2.069):/oracle/EPP/sapdata2[amlog_trace:320] echo '|2023-01-28T18:03:46.409210|INFO: Activating Filesystems completed|/oracle/EPP/sapdata2' +epprd_rg:cl_activate_fs(2.069):/oracle/EPP/sapdata2[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.069):/oracle/EPP/sapdata2[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(2.069):/oracle/EPP/sapdata2[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(2.069):/oracle/EPP/sapdata2[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(2.069):/oracle/EPP/sapdata2[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(2.069):/oracle/EPP/sapdata2[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(2.069):/oracle/EPP/sapdata2[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(2.069):/oracle/EPP/sapdata2[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(2.069):/oracle/EPP/sapdata2[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(2.069):/oracle/EPP/sapdata2[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(2.069):/oracle/EPP/sapdata2[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(2.070):/oracle/EPP/sapdata2[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(2.071):/oracle/EPP/sapdata2[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata2[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata2[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata2[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata2[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata2[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata2[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata2[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata2[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.953):/oracle/EPP/sapdata2[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.8 \n\t lvname = sapdata2lv \n\t label = /oracle/EPP/sapdata2 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:45 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata2[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[activate_fs_process_group:540] fs_mount /oracle/EPP/sapdata3 fsck epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[fs_mount:69] FS=/oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[fs_mount:86] lsfs -c /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(2.074):/oracle/EPP/sapdata3[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(2.079):/oracle/EPP/sapdata3[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(2.075):/oracle/EPP/sapdata3[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata3:/dev/sapdata3lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.079):/oracle/EPP/sapdata3[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(2.079):/oracle/EPP/sapdata3[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.080):/oracle/EPP/sapdata3[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(2.075):/oracle/EPP/sapdata3[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata3:/dev/sapdata3lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.081):/oracle/EPP/sapdata3[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(2.082):/oracle/EPP/sapdata3[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(2.082):/oracle/EPP/sapdata3[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(2.083):/oracle/EPP/sapdata3[fs_mount:100] LV_name=sapdata3lv +epprd_rg:cl_activate_fs(2.083):/oracle/EPP/sapdata3[fs_mount:101] getlvcb -T -A sapdata3lv +epprd_rg:cl_activate_fs(2.084):/oracle/EPP/sapdata3[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(2.102):/oracle/EPP/sapdata3[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(2.084):/oracle/EPP/sapdata3[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.9 \n\t lvname = sapdata3lv \n\t label = /oracle/EPP/sapdata3 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' +epprd_rg:cl_activate_fs(2.102):/oracle/EPP/sapdata3[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(2.102):/oracle/EPP/sapdata3[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.103):/oracle/EPP/sapdata3[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(2.084):/oracle/EPP/sapdata3[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.9 \n\t lvname = sapdata3lv \n\t label = /oracle/EPP/sapdata3 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' +epprd_rg:cl_activate_fs(2.104):/oracle/EPP/sapdata3[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(2.105):/oracle/EPP/sapdata3[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(2.106):/oracle/EPP/sapdata3[fs_mount:115] clodmget -q 'name = sapdata3lv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(2.109):/oracle/EPP/sapdata3[fs_mount:115] CuAt_label=/oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(2.110):/oracle/EPP/sapdata3[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/sapdata3 from /etc/filesystems +epprd_rg:cl_activate_fs(2.110):/oracle/EPP/sapdata3[fs_mount:119] : should match /oracle/EPP/sapdata3 from CuAt ODM and /oracle/EPP/sapdata3 from the LVCB +epprd_rg:cl_activate_fs(2.110):/oracle/EPP/sapdata3[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(2.110):/oracle/EPP/sapdata3[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(2.110):/oracle/EPP/sapdata3[fs_mount:123] [[ /oracle/EPP/sapdata3 != /oracle/EPP/sapdata3 ]] +epprd_rg:cl_activate_fs(2.110):/oracle/EPP/sapdata3[fs_mount:128] [[ /oracle/EPP/sapdata3 != /oracle/EPP/sapdata3 ]] +epprd_rg:cl_activate_fs(2.110):/oracle/EPP/sapdata3[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.110):/oracle/EPP/sapdata3[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(2.110):/oracle/EPP/sapdata3[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(2.129):/oracle/EPP/sapdata3[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.130):/oracle/EPP/sapdata3[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(2.130):/oracle/EPP/sapdata3[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_activate_fs(2.130):/oracle/EPP/sapdata3[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.130):/oracle/EPP/sapdata3[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.154):/oracle/EPP/sapdata3[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.157):/oracle/EPP/sapdata3[amlog_trace:319] DATE=2023-01-28T18:03:46.497268 +epprd_rg:cl_activate_fs(2.157):/oracle/EPP/sapdata3[amlog_trace:320] echo '|2023-01-28T18:03:46.497268|INFO: Activating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_activate_fs(2.157):/oracle/EPP/sapdata3[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.157):/oracle/EPP/sapdata3[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(2.160):/oracle/EPP/sapdata3[fs_mount:162] : Try to mount filesystem /oracle/EPP/sapdata3 at Jan 28 18:03:46.000 +epprd_rg:cl_activate_fs(2.160):/oracle/EPP/sapdata3[fs_mount:163] mount /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(2.171):/oracle/EPP/sapdata3[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.171):/oracle/EPP/sapdata3[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(2.171):/oracle/EPP/sapdata3[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(2.171):/oracle/EPP/sapdata3[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/sapdata3' +epprd_rg:cl_activate_fs(2.171):/oracle/EPP/sapdata3[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.172):/oracle/EPP/sapdata3[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.196):/oracle/EPP/sapdata3[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.198):/oracle/EPP/sapdata3[amlog_trace:319] DATE=2023-01-28T18:03:46.538698 +epprd_rg:cl_activate_fs(2.198):/oracle/EPP/sapdata3[amlog_trace:320] echo '|2023-01-28T18:03:46.538698|INFO: Activating Filesystems completed|/oracle/EPP/sapdata3' +epprd_rg:cl_activate_fs(2.198):/oracle/EPP/sapdata3[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.199):/oracle/EPP/sapdata3[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(2.199):/oracle/EPP/sapdata3[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(2.199):/oracle/EPP/sapdata3[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(2.199):/oracle/EPP/sapdata3[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(2.199):/oracle/EPP/sapdata3[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(2.199):/oracle/EPP/sapdata3[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(2.199):/oracle/EPP/sapdata3[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(2.199):/oracle/EPP/sapdata3[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(2.199):/oracle/EPP/sapdata3[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(2.199):/oracle/EPP/sapdata3[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(2.200):/oracle/EPP/sapdata3[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(2.201):/oracle/EPP/sapdata3[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(2.202):/oracle/EPP/sapdata3[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(2.202):/oracle/EPP/sapdata3[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(2.202):/oracle/EPP/sapdata3[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(2.202):/oracle/EPP/sapdata3[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(2.202):/oracle/EPP/sapdata3[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(2.202):/oracle/EPP/sapdata3[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(2.202):/oracle/EPP/sapdata3[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(2.202):/oracle/EPP/sapdata3[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(2.084):/oracle/EPP/sapdata3[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.9 \n\t lvname = sapdata3lv \n\t label = /oracle/EPP/sapdata3 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata3[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[activate_fs_process_group:540] fs_mount /oracle/EPP/sapdata4 fsck epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[fs_mount:69] FS=/oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[fs_mount:86] lsfs -c /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(2.204):/oracle/EPP/sapdata4[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(2.209):/oracle/EPP/sapdata4[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(2.204):/oracle/EPP/sapdata4[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata4:/dev/sapdata4lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.209):/oracle/EPP/sapdata4[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(2.209):/oracle/EPP/sapdata4[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.210):/oracle/EPP/sapdata4[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(2.204):/oracle/EPP/sapdata4[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata4:/dev/sapdata4lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.211):/oracle/EPP/sapdata4[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(2.211):/oracle/EPP/sapdata4[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(2.212):/oracle/EPP/sapdata4[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(2.213):/oracle/EPP/sapdata4[fs_mount:100] LV_name=sapdata4lv +epprd_rg:cl_activate_fs(2.213):/oracle/EPP/sapdata4[fs_mount:101] getlvcb -T -A sapdata4lv +epprd_rg:cl_activate_fs(2.214):/oracle/EPP/sapdata4[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(2.232):/oracle/EPP/sapdata4[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(2.214):/oracle/EPP/sapdata4[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.10 \n\t lvname = sapdata4lv \n\t label = /oracle/EPP/sapdata4 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' +epprd_rg:cl_activate_fs(2.232):/oracle/EPP/sapdata4[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(2.232):/oracle/EPP/sapdata4[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.233):/oracle/EPP/sapdata4[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(2.214):/oracle/EPP/sapdata4[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.10 \n\t lvname = sapdata4lv \n\t label = /oracle/EPP/sapdata4 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' +epprd_rg:cl_activate_fs(2.234):/oracle/EPP/sapdata4[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(2.235):/oracle/EPP/sapdata4[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(2.236):/oracle/EPP/sapdata4[fs_mount:115] clodmget -q 'name = sapdata4lv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(2.240):/oracle/EPP/sapdata4[fs_mount:115] CuAt_label=/oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(2.240):/oracle/EPP/sapdata4[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/sapdata4 from /etc/filesystems +epprd_rg:cl_activate_fs(2.240):/oracle/EPP/sapdata4[fs_mount:119] : should match /oracle/EPP/sapdata4 from CuAt ODM and /oracle/EPP/sapdata4 from the LVCB +epprd_rg:cl_activate_fs(2.240):/oracle/EPP/sapdata4[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(2.240):/oracle/EPP/sapdata4[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(2.240):/oracle/EPP/sapdata4[fs_mount:123] [[ /oracle/EPP/sapdata4 != /oracle/EPP/sapdata4 ]] +epprd_rg:cl_activate_fs(2.240):/oracle/EPP/sapdata4[fs_mount:128] [[ /oracle/EPP/sapdata4 != /oracle/EPP/sapdata4 ]] +epprd_rg:cl_activate_fs(2.240):/oracle/EPP/sapdata4[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.240):/oracle/EPP/sapdata4[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(2.240):/oracle/EPP/sapdata4[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(2.260):/oracle/EPP/sapdata4[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.260):/oracle/EPP/sapdata4[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(2.260):/oracle/EPP/sapdata4[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_activate_fs(2.260):/oracle/EPP/sapdata4[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.261):/oracle/EPP/sapdata4[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.285):/oracle/EPP/sapdata4[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.287):/oracle/EPP/sapdata4[amlog_trace:319] DATE=2023-01-28T18:03:46.627548 +epprd_rg:cl_activate_fs(2.287):/oracle/EPP/sapdata4[amlog_trace:320] echo '|2023-01-28T18:03:46.627548|INFO: Activating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_activate_fs(2.287):/oracle/EPP/sapdata4[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.287):/oracle/EPP/sapdata4[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(2.290):/oracle/EPP/sapdata4[fs_mount:162] : Try to mount filesystem /oracle/EPP/sapdata4 at Jan 28 18:03:46.000 +epprd_rg:cl_activate_fs(2.290):/oracle/EPP/sapdata4[fs_mount:163] mount /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(2.301):/oracle/EPP/sapdata4[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.301):/oracle/EPP/sapdata4[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(2.302):/oracle/EPP/sapdata4[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(2.302):/oracle/EPP/sapdata4[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/sapdata4' +epprd_rg:cl_activate_fs(2.302):/oracle/EPP/sapdata4[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.302):/oracle/EPP/sapdata4[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.326):/oracle/EPP/sapdata4[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.329):/oracle/EPP/sapdata4[amlog_trace:319] DATE=2023-01-28T18:03:46.669276 +epprd_rg:cl_activate_fs(2.329):/oracle/EPP/sapdata4[amlog_trace:320] echo '|2023-01-28T18:03:46.669276|INFO: Activating Filesystems completed|/oracle/EPP/sapdata4' +epprd_rg:cl_activate_fs(2.329):/oracle/EPP/sapdata4[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.329):/oracle/EPP/sapdata4[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(2.329):/oracle/EPP/sapdata4[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(2.329):/oracle/EPP/sapdata4[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(2.329):/oracle/EPP/sapdata4[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(2.329):/oracle/EPP/sapdata4[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(2.329):/oracle/EPP/sapdata4[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(2.329):/oracle/EPP/sapdata4[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(2.329):/oracle/EPP/sapdata4[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(2.329):/oracle/EPP/sapdata4[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(2.329):/oracle/EPP/sapdata4[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(2.330):/oracle/EPP/sapdata4[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(2.331):/oracle/EPP/sapdata4[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(2.333):/oracle/EPP/sapdata4[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(2.333):/oracle/EPP/sapdata4[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(2.333):/oracle/EPP/sapdata4[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(2.333):/oracle/EPP/sapdata4[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(2.333):/oracle/EPP/sapdata4[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(2.333):/oracle/EPP/sapdata4[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(2.333):/oracle/EPP/sapdata4[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(2.333):/oracle/EPP/sapdata4[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(2.214):/oracle/EPP/sapdata4[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.10 \n\t lvname = sapdata4lv \n\t label = /oracle/EPP/sapdata4 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(2.333):/oracle/EPP/sapdata4[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(2.333):/sapmnt[activate_fs_process_group:527] PS4_LOOP=/sapmnt +epprd_rg:cl_activate_fs(2.333):/sapmnt[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(2.333):/sapmnt[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(2.333):/sapmnt[activate_fs_process_group:540] fs_mount /sapmnt fsck epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(2.333):/sapmnt[fs_mount:69] FS=/sapmnt +epprd_rg:cl_activate_fs(2.333):/sapmnt[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(2.333):/sapmnt[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(2.333):/sapmnt[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(2.333):/sapmnt[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(2.333):/sapmnt[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(2.333):/sapmnt[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.333):/sapmnt[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(2.333):/sapmnt[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(2.333):/sapmnt[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(2.333):/sapmnt[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(2.333):/sapmnt[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(2.333):/sapmnt[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(2.333):/sapmnt[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(2.333):/sapmnt[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(2.333):/sapmnt[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(2.333):/sapmnt[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(2.333):/sapmnt[fs_mount:81] : Here check to see if the information in /etc/filesystems for /sapmnt +epprd_rg:cl_activate_fs(2.333):/sapmnt[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(2.333):/sapmnt[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(2.333):/sapmnt[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(2.334):/sapmnt[fs_mount:86] lsfs -c /sapmnt +epprd_rg:cl_activate_fs(2.334):/sapmnt[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(2.340):/sapmnt[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(2.335):/sapmnt[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/sapmnt:/dev/sapmntlv:jfs2:::20971520:rw:no:no' +epprd_rg:cl_activate_fs(2.340):/sapmnt[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(2.340):/sapmnt[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.341):/sapmnt[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(2.335):/sapmnt[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/sapmnt:/dev/sapmntlv:jfs2:::20971520:rw:no:no' +epprd_rg:cl_activate_fs(2.341):/sapmnt[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(2.342):/sapmnt[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(2.342):/sapmnt[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(2.343):/sapmnt[fs_mount:100] LV_name=sapmntlv +epprd_rg:cl_activate_fs(2.344):/sapmnt[fs_mount:101] getlvcb -T -A sapmntlv +epprd_rg:cl_activate_fs(2.344):/sapmnt[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(2.362):/sapmnt[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(2.345):/sapmnt[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.3 \n\t lvname = sapmntlv \n\t label = /sapmnt \n\t machine id = 44AF14B00 \n\t number lps = 20 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:41 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' +epprd_rg:cl_activate_fs(2.362):/sapmnt[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(2.362):/sapmnt[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.363):/sapmnt[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(2.345):/sapmnt[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.3 \n\t lvname = sapmntlv \n\t label = /sapmnt \n\t machine id = 44AF14B00 \n\t number lps = 20 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:41 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' +epprd_rg:cl_activate_fs(2.364):/sapmnt[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(2.365):/sapmnt[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(2.366):/sapmnt[fs_mount:115] clodmget -q 'name = sapmntlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(2.370):/sapmnt[fs_mount:115] CuAt_label=/sapmnt +epprd_rg:cl_activate_fs(2.370):/sapmnt[fs_mount:118] : At this point, if things are working correctly, /sapmnt from /etc/filesystems +epprd_rg:cl_activate_fs(2.370):/sapmnt[fs_mount:119] : should match /sapmnt from CuAt ODM and /sapmnt from the LVCB +epprd_rg:cl_activate_fs(2.370):/sapmnt[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(2.370):/sapmnt[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(2.370):/sapmnt[fs_mount:123] [[ /sapmnt != /sapmnt ]] +epprd_rg:cl_activate_fs(2.370):/sapmnt[fs_mount:128] [[ /sapmnt != /sapmnt ]] +epprd_rg:cl_activate_fs(2.370):/sapmnt[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.370):/sapmnt[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(2.370):/sapmnt[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(2.390):/sapmnt[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.390):/sapmnt[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(2.390):/sapmnt[fs_mount:160] amlog_trace '' 'Activating Filesystem|/sapmnt' +epprd_rg:cl_activate_fs(2.390):/sapmnt[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.391):/sapmnt[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.414):/sapmnt[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.417):/sapmnt[amlog_trace:319] DATE=2023-01-28T18:03:46.757442 +epprd_rg:cl_activate_fs(2.417):/sapmnt[amlog_trace:320] echo '|2023-01-28T18:03:46.757442|INFO: Activating Filesystem|/sapmnt' +epprd_rg:cl_activate_fs(2.417):/sapmnt[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.417):/sapmnt[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(2.420):/sapmnt[fs_mount:162] : Try to mount filesystem /sapmnt at Jan 28 18:03:46.000 +epprd_rg:cl_activate_fs(2.420):/sapmnt[fs_mount:163] mount /sapmnt +epprd_rg:cl_activate_fs(2.431):/sapmnt[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.431):/sapmnt[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(2.431):/sapmnt[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(2.431):/sapmnt[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/sapmnt' +epprd_rg:cl_activate_fs(2.431):/sapmnt[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.432):/sapmnt[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.456):/sapmnt[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.459):/sapmnt[amlog_trace:319] DATE=2023-01-28T18:03:46.799392 +epprd_rg:cl_activate_fs(2.459):/sapmnt[amlog_trace:320] echo '|2023-01-28T18:03:46.799392|INFO: Activating Filesystems completed|/sapmnt' +epprd_rg:cl_activate_fs(2.459):/sapmnt[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.459):/sapmnt[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(2.459):/sapmnt[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(2.459):/sapmnt[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(2.459):/sapmnt[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(2.459):/sapmnt[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(2.459):/sapmnt[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(2.459):/sapmnt[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(2.459):/sapmnt[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(2.459):/sapmnt[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(2.459):/sapmnt[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(2.460):/sapmnt[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(2.461):/sapmnt[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(2.463):/sapmnt[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(2.463):/sapmnt[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(2.463):/sapmnt[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(2.463):/sapmnt[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(2.463):/sapmnt[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(2.463):/sapmnt[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(2.463):/sapmnt[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(2.463):/sapmnt[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(2.345):/sapmnt[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.3 \n\t lvname = sapmntlv \n\t label = /sapmnt \n\t machine id = 44AF14B00 \n\t number lps = 20 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:41 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(2.463):/sapmnt[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(2.463):/usr/sap[activate_fs_process_group:527] PS4_LOOP=/usr/sap +epprd_rg:cl_activate_fs(2.463):/usr/sap[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(2.463):/usr/sap[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(2.463):/usr/sap[activate_fs_process_group:540] fs_mount /usr/sap fsck epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(2.463):/usr/sap[fs_mount:69] FS=/usr/sap +epprd_rg:cl_activate_fs(2.463):/usr/sap[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(2.463):/usr/sap[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(2.463):/usr/sap[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(2.463):/usr/sap[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27918684 +epprd_rg:cl_activate_fs(2.463):/usr/sap[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(2.463):/usr/sap[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.463):/usr/sap[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(2.463):/usr/sap[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(2.463):/usr/sap[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(2.463):/usr/sap[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(2.463):/usr/sap[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(2.463):/usr/sap[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:81] : Here check to see if the information in /etc/filesystems for /usr/sap +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:86] lsfs -c /usr/sap +epprd_rg:cl_activate_fs(2.465):/usr/sap[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(2.470):/usr/sap[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(2.465):/usr/sap[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/usr/sap:/dev/saplv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.470):/usr/sap[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(2.470):/usr/sap[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.471):/usr/sap[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(2.465):/usr/sap[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/usr/sap:/dev/saplv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.472):/usr/sap[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(2.472):/usr/sap[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(2.472):/usr/sap[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(2.474):/usr/sap[fs_mount:100] LV_name=saplv +epprd_rg:cl_activate_fs(2.474):/usr/sap[fs_mount:101] getlvcb -T -A saplv +epprd_rg:cl_activate_fs(2.475):/usr/sap[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(2.493):/usr/sap[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(2.475):/usr/sap[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.2 \n\t lvname = saplv \n\t label = /usr/sap \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:37 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' +epprd_rg:cl_activate_fs(2.493):/usr/sap[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(2.493):/usr/sap[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.494):/usr/sap[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(2.475):/usr/sap[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.2 \n\t lvname = saplv \n\t label = /usr/sap \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:37 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' +epprd_rg:cl_activate_fs(2.495):/usr/sap[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(2.496):/usr/sap[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(2.497):/usr/sap[fs_mount:115] clodmget -q 'name = saplv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(2.500):/usr/sap[fs_mount:115] CuAt_label=/usr/sap +epprd_rg:cl_activate_fs(2.500):/usr/sap[fs_mount:118] : At this point, if things are working correctly, /usr/sap from /etc/filesystems +epprd_rg:cl_activate_fs(2.500):/usr/sap[fs_mount:119] : should match /usr/sap from CuAt ODM and /usr/sap from the LVCB +epprd_rg:cl_activate_fs(2.500):/usr/sap[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(2.500):/usr/sap[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(2.500):/usr/sap[fs_mount:123] [[ /usr/sap != /usr/sap ]] +epprd_rg:cl_activate_fs(2.501):/usr/sap[fs_mount:128] [[ /usr/sap != /usr/sap ]] +epprd_rg:cl_activate_fs(2.501):/usr/sap[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.501):/usr/sap[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(2.501):/usr/sap[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(2.520):/usr/sap[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.520):/usr/sap[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(2.520):/usr/sap[fs_mount:160] amlog_trace '' 'Activating Filesystem|/usr/sap' +epprd_rg:cl_activate_fs(2.521):/usr/sap[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.521):/usr/sap[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.545):/usr/sap[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.548):/usr/sap[amlog_trace:319] DATE=2023-01-28T18:03:46.888384 +epprd_rg:cl_activate_fs(2.548):/usr/sap[amlog_trace:320] echo '|2023-01-28T18:03:46.888384|INFO: Activating Filesystem|/usr/sap' +epprd_rg:cl_activate_fs(2.548):/usr/sap[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.548):/usr/sap[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(2.551):/usr/sap[fs_mount:162] : Try to mount filesystem /usr/sap at Jan 28 18:03:46.000 +epprd_rg:cl_activate_fs(2.551):/usr/sap[fs_mount:163] mount /usr/sap +epprd_rg:cl_activate_fs(2.563):/usr/sap[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.563):/usr/sap[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(2.563):/usr/sap[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(2.563):/usr/sap[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/usr/sap' +epprd_rg:cl_activate_fs(2.563):/usr/sap[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.564):/usr/sap[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.588):/usr/sap[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.590):/usr/sap[amlog_trace:319] DATE=2023-01-28T18:03:46.930633 +epprd_rg:cl_activate_fs(2.590):/usr/sap[amlog_trace:320] echo '|2023-01-28T18:03:46.930633|INFO: Activating Filesystems completed|/usr/sap' +epprd_rg:cl_activate_fs(2.590):/usr/sap[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.590):/usr/sap[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(2.590):/usr/sap[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(2.590):/usr/sap[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(2.590):/usr/sap[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(2.590):/usr/sap[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(2.590):/usr/sap[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(2.591):/usr/sap[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(2.591):/usr/sap[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(2.591):/usr/sap[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(2.591):/usr/sap[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(2.592):/usr/sap[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(2.593):/usr/sap[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(2.594):/usr/sap[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(2.594):/usr/sap[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(2.594):/usr/sap[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(2.594):/usr/sap[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(2.594):/usr/sap[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(2.594):/usr/sap[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(2.594):/usr/sap[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(2.594):/usr/sap[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(2.475):/usr/sap[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.2 \n\t lvname = saplv \n\t label = /usr/sap \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:37 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(2.595):/usr/sap[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(2.595):/usr/sap[activate_fs_process_group:543] unset PS4_LOOP PS4_TIMER +epprd_rg:cl_activate_fs[activate_fs_process_group:546] : Allow any background mount operations to finish +epprd_rg:cl_activate_fs[activate_fs_process_group:548] wait +epprd_rg:cl_activate_fs[activate_fs_process_group:550] : Read cluster level Preferread read option +epprd_rg:cl_activate_fs[activate_fs_process_group:552] clodmget -n -f lvm_preferred_read HACMPcluster +epprd_rg:cl_activate_fs[activate_fs_process_group:552] cluster_pref_read=roundrobin +epprd_rg:cl_activate_fs[activate_fs_process_group:555] : Looping all file systems to update preferred read option of each lv. +epprd_rg:cl_activate_fs[activate_fs_process_group:556] : By referring VG level preferred_read option or cluster level Preferred read option +epprd_rg:cl_activate_fs[activate_fs_process_group:560] lsfs -c /board_org +epprd_rg:cl_activate_fs[activate_fs_process_group:560] 2>& 1 +epprd_rg:cl_activate_fs[activate_fs_process_group:560] FS_info=$'+epprd_rg:cl_activate_fs[activate_fs_process_group:560] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs[activate_fs_process_group:561] RC=0 +epprd_rg:cl_activate_fs[activate_fs_process_group:562] (( 0 != 0 )) +epprd_rg:cl_activate_fs[activate_fs_process_group:574] print -- $'+epprd_rg:cl_activate_fs[activate_fs_process_group:560] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs[activate_fs_process_group:574] tail -1 +epprd_rg:cl_activate_fs[activate_fs_process_group:574] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs[activate_fs_process_group:574] IFS=: +epprd_rg:cl_activate_fs[activate_fs_process_group:575] LV_name=boardlv +epprd_rg:cl_activate_fs[activate_fs_process_group:577] grep -w 'VOLUME GROUP' +epprd_rg:cl_activate_fs[activate_fs_process_group:577] lslv -L boardlv +epprd_rg:cl_activate_fs[activate_fs_process_group:577] LC_ALL=C +epprd_rg:cl_activate_fs[activate_fs_process_group:577] volume_group='LOGICAL VOLUME: boardlv VOLUME GROUP: datavg' +epprd_rg:cl_activate_fs[activate_fs_process_group:578] volume_group=datavg +epprd_rg:cl_activate_fs[activate_fs_process_group:579] volume_group=datavg +epprd_rg:cl_activate_fs[activate_fs_process_group:581] clodmget -n -f group -q name='VOLUME_GROUP and value=datavg' HACMPresource +epprd_rg:cl_activate_fs[activate_fs_process_group:581] RGName=epprd_rg +epprd_rg:cl_activate_fs[activate_fs_process_group:584] : Get the Preferred storage read option for this VG and perform chlv command +epprd_rg:cl_activate_fs[activate_fs_process_group:586] clodmget -n -f value -q name='LVM_PREFERRED_READ and volume_group=datavg' HACMPvolumegroup +epprd_rg:cl_activate_fs[activate_fs_process_group:586] 2> /dev/null +epprd_rg:cl_activate_fs[activate_fs_process_group:586] PreferredReadOption='' +epprd_rg:cl_activate_fs[activate_fs_process_group:587] [[ -z '' ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:589] PreferredReadOption=roundrobin +epprd_rg:cl_activate_fs[activate_fs_process_group:590] [[ -z roundrobin ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:590] [[ roundrobin == roundrobin ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:593] : Both VG level and Cluster level LVM Preferred Read option chosen as roundrobin. +epprd_rg:cl_activate_fs[activate_fs_process_group:595] chlv -R 0 boardlv +epprd_rg:cl_activate_fs[activate_fs_process_group:596] (( 0 != 0 )) +epprd_rg:cl_activate_fs[activate_fs_process_group:600] break +epprd_rg:cl_activate_fs[activate_fs_process_group:670] : Update the resource manager with the state of the operation +epprd_rg:cl_activate_fs[activate_fs_process_group:672] ALLNOERROR=All_non_error_filesystems +epprd_rg:cl_activate_fs[activate_fs_process_group:673] cl_RMupdate resource_up All_non_error_filesystems cl_activate_fs 2023-01-28T18:03:47.236013 2023-01-28T18:03:47.240465 +epprd_rg:cl_activate_fs[activate_fs_process_group:676] : And harvest any status from the background mount operations +epprd_rg:cl_activate_fs[activate_fs_process_group:678] [[ -f /tmp/epprd_rg_activate_fs.tmp27918684 ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:688] return 0 +epprd_rg:cl_activate_fs[activate_fs_process_resources:767] RC=0 +epprd_rg:cl_activate_fs[activate_fs_process_resources:768] (( 0 != 0 && 0 == 0 )) +epprd_rg:cl_activate_fs[activate_fs_process_resources:772] RG_FILE_SYSTEMS='' +epprd_rg:cl_activate_fs[activate_fs_process_resources:776] return 0 +epprd_rg:cl_activate_fs[851] STATUS=0 +epprd_rg:cl_activate_fs[873] return 0 +epprd_rg:process_resources(8.237)[process_file_systems:2648] RC=0 +epprd_rg:process_resources(8.237)[process_file_systems:2649] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources(8.237)[process_file_systems:2661] (( 0 != 0 )) +epprd_rg:process_resources(8.237)[process_file_systems:2687] return 0 +epprd_rg:process_resources(8.237)[3483] RC=0 +epprd_rg:process_resources(8.237)[3485] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources(8.238)[3324] true +epprd_rg:process_resources(8.238)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(8.238)[3328] set -a +epprd_rg:process_resources(8.238)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:03:47.254058 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(8.257)[3329] eval JOB_TYPE=SYNC_VGS ACTION=ACQUIRE VOLUME_GROUPS='"datavg"' RESOURCE_GROUPS='"epprd_rg' '"' +epprd_rg:process_resources(8.257)[1] JOB_TYPE=SYNC_VGS +epprd_rg:process_resources(8.257)[1] ACTION=ACQUIRE +epprd_rg:process_resources(8.257)[1] VOLUME_GROUPS=datavg +epprd_rg:process_resources(8.257)[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources(8.257)[3330] RC=0 +epprd_rg:process_resources(8.257)[3331] set +a +epprd_rg:process_resources(8.257)[3333] (( 0 != 0 )) +epprd_rg:process_resources(8.257)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(8.257)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(8.257)[3343] export GROUPNAME +epprd_rg:process_resources(8.257)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(8.257)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(8.257)[3360] [[ SYNC_VGS == RELEASE ]] +epprd_rg:process_resources(8.257)[3360] [[ SYNC_VGS == ONLINE ]] +epprd_rg:process_resources(8.257)[3474] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources(8.257)[3476] sync_volume_groups +epprd_rg:process_resources(8.257)[sync_volume_groups:2699] PS4_FUNC=sync_volume_groups +epprd_rg:process_resources(8.257)[sync_volume_groups:2699] typeset PS4_FUNC +epprd_rg:process_resources(8.257)[sync_volume_groups:2700] [[ high == high ]] +epprd_rg:process_resources(8.257)[sync_volume_groups:2700] set -x +epprd_rg:process_resources(8.257)[sync_volume_groups:2701] STAT=0 +epprd_rg:process_resources(8.257)[sync_volume_groups:2704] export GROUPNAME +epprd_rg:process_resources(8.258)[sync_volume_groups:2706] get_list_head datavg +epprd_rg:process_resources(8.259)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(8.259)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(8.259)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(8.259)[get_list_head:60] set -x +epprd_rg:process_resources(8.260)[get_list_head:61] echo datavg +epprd_rg:process_resources(8.260)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(8.260)[get_list_head:61] IFS=: +epprd_rg:process_resources(8.261)[get_list_head:62] echo datavg +epprd_rg:process_resources(8.261)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(8.258)[sync_volume_groups:2706] read LIST_OF_VOLUME_GROUPS_FOR_RG +epprd_rg:process_resources(8.265)[sync_volume_groups:2707] get_list_tail datavg +epprd_rg:process_resources(8.265)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(8.265)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(8.265)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(8.265)[get_list_tail:68] set -x +epprd_rg:process_resources(8.266)[get_list_tail:69] echo datavg +epprd_rg:process_resources(8.267)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(8.267)[get_list_tail:69] IFS=: +epprd_rg:process_resources(8.267)[get_list_tail:70] echo +epprd_rg:process_resources(8.266)[sync_volume_groups:2707] read VOLUME_GROUPS +epprd_rg:process_resources(8.268)[sync_volume_groups:2710] : Sync the active volume groups +epprd_rg:process_resources(8.269)[sync_volume_groups:2712] lsvg -L -o +epprd_rg:process_resources(8.269)[sync_volume_groups:2712] 2> /tmp/lsvg.err +epprd_rg:process_resources(8.269)[sync_volume_groups:2712] sort +epprd_rg:process_resources(8.271)[sync_volume_groups:2712] 1> /tmp/lsvg.out.28836342 +epprd_rg:process_resources(8.277)[sync_volume_groups:2713] echo datavg +epprd_rg:process_resources(8.278)[sync_volume_groups:2713] tr ' ' '\n' +epprd_rg:process_resources(8.280)[sync_volume_groups:2714] comm -12 /tmp/lsvg.out.28836342 - +epprd_rg:process_resources(8.280)[sync_volume_groups:2714] sort +epprd_rg:process_resources(8.285)[sync_volume_groups:2718] [[ -s /tmp/lsvg.err ]] +epprd_rg:process_resources(8.285)[sync_volume_groups:2723] rm -f /tmp/lsvg.out.28836342 /tmp/lsvg.err +epprd_rg:process_resources(8.285)[sync_volume_groups:2716] cl_sync_vgs datavg +epprd_rg:process_resources(8.289)[sync_volume_groups:2732] unset AM_SYNC_CALLED_BY +epprd_rg:process_resources(8.289)[sync_volume_groups:2734] return 0 +epprd_rg:process_resources(8.290)[3324] true +epprd_rg:process_resources(8.290)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(8.290)[3328] set -a +epprd_rg:process_resources(8.290)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa +epprd_rg:cl_sync_vgs[303] version=1.24.1.4 +epprd_rg:cl_sync_vgs[306] (( 1 == 0 )) +epprd_rg:cl_sync_vgs[312] : syncing 4 stale PPs at a time seems to be a win most of the time, but +epprd_rg:cl_sync_vgs[313] : we honor the NUM_PARALLEL_LPS value from /etc/environment, as does +epprd_rg:cl_sync_vgs[314] : syncvg. +epprd_rg:cl_sync_vgs[316] syncflag='' +epprd_rg:cl_sync_vgs[316] export syncflag +epprd_rg:cl_sync_vgs[317] PS4_LOOP='' +epprd_rg:cl_sync_vgs[317] export PS4_LOOP +epprd_rg:cl_sync_vgs[318] typeset -i npl +epprd_rg:cl_sync_vgs[319] grep -q ^NUM_PARALLEL_LPS= /etc/environment 2023-01-28T18:03:47.306275 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:cl_sync_vgs[321] syncflag=-P4 +epprd_rg:cl_sync_vgs[328] echo 'NOTE: While the sync is going on, volume group can be used' NOTE: While the sync is going on, volume group can be used +epprd_rg:cl_sync_vgs[331] : For GLVM volume groups, read PARALLEL LPS count from HACMPresource if it is set from GUI, +epprd_rg:cl_sync_vgs[332] : else, read from environment variables, if it is not set use 32 as default value. +epprd_rg:cl_sync_vgs[334] clodmget -q name='GMVG_REP_RESOURCE and value=datavg' -f group HACMPresource +epprd_rg:process_resources(8.303)[3329] eval JOB_TYPE=EXPORT_FILESYSTEMS ACTION=ACQUIRE EXPORT_FILE_SYSTEMS='"/board_org,/sapmnt/EPP"' EXPORT_FILE_SYSTEMS_V4='""' RESOURCE_GROUPS='"epprd_rg' '"' STABLE_STORAGE_PATH='""' IP_LABELS='"epprd:epprda:epprds"' DAEMONS='"NFS' 'RPCLOCKD"' +epprd_rg:process_resources(8.303)[1] JOB_TYPE=EXPORT_FILESYSTEMS +epprd_rg:process_resources(8.303)[1] ACTION=ACQUIRE +epprd_rg:process_resources(8.303)[1] EXPORT_FILE_SYSTEMS=/board_org,/sapmnt/EPP +epprd_rg:process_resources(8.303)[1] EXPORT_FILE_SYSTEMS_V4='' +epprd_rg:process_resources(8.303)[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources(8.303)[1] STABLE_STORAGE_PATH='' +epprd_rg:process_resources(8.303)[1] IP_LABELS=epprd:epprda:epprds +epprd_rg:process_resources(8.303)[1] DAEMONS='NFS RPCLOCKD' +epprd_rg:process_resources(8.303)[3330] RC=0 +epprd_rg:process_resources(8.303)[3331] set +a +epprd_rg:process_resources(8.303)[3333] (( 0 != 0 )) +epprd_rg:process_resources(8.303)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(8.303)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(8.303)[3343] export GROUPNAME +epprd_rg:process_resources(8.303)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(8.303)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(8.303)[3360] [[ EXPORT_FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources(8.303)[3360] [[ EXPORT_FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources(8.303)[3595] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources(8.303)[3597] export_filesystems +epprd_rg:process_resources(8.303)[export_filesystems:1621] PS4_FUNC=export_filesystems +epprd_rg:process_resources(8.303)[export_filesystems:1621] typeset PS4_FUNC +epprd_rg:process_resources(8.303)[export_filesystems:1622] [[ high == high ]] +epprd_rg:process_resources(8.303)[export_filesystems:1622] set -x +epprd_rg:process_resources(8.303)[export_filesystems:1623] STAT=0 +epprd_rg:process_resources(8.303)[export_filesystems:1624] NFSSTOPPED=0 +epprd_rg:process_resources(8.303)[export_filesystems:1629] [[ NFS == RPCLOCKD ]] +epprd_rg:process_resources(8.303)[export_filesystems:1629] [[ RPCLOCKD == RPCLOCKD ]] +epprd_rg:process_resources(8.303)[export_filesystems:1631] stopsrc -s rpc.lockd +epprd_rg:cl_sync_vgs[334] 2> /dev/null +epprd_rg:cl_sync_vgs[334] glvm_rg='' +epprd_rg:cl_sync_vgs[335] [[ -n '' ]] +epprd_rg:cl_sync_vgs[353] check_sync datavg +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:76] typeset vg_name +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:77] typeset vgid +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:78] typeset disklist +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:79] typeset lv_name +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:80] typeset -li stale_count +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:81] typeset -li mode +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:82] RC=0 +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:82] typeset -li RC +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:83] typeset site_node_list +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:84] typeset site_choice +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:86] vg_name=datavg +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:87] disklist='' +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:89] getlvodm -v datavg 0513-044 The rpc.lockd Subsystem was requested to stop. +epprd_rg:cl_sync_vgs(0.021):datavg[check_sync:89] vgid=00c44af100004b00000001851e9dc053 +epprd_rg:cl_sync_vgs(0.021):datavg[check_sync:92] : find disks in the VG that LVM thinks are inaccessable +epprd_rg:cl_sync_vgs(0.021):datavg[check_sync:94] lsvg -L -p datavg +epprd_rg:cl_sync_vgs(0.022):datavg[check_sync:94] LC_ALL=C +epprd_rg:process_resources(8.313)[export_filesystems:1633] touch /tmp/.RPCLOCKDSTOPPED +epprd_rg:process_resources(8.317)[export_filesystems:1638] : For NFSv4, cl_export_fs will use STABLE_STORAGE_PATH, which is set by +epprd_rg:process_resources(8.317)[export_filesystems:1639] : clRGPA and can have colon-separated values for multiple RGs. +epprd_rg:process_resources(8.317)[export_filesystems:1640] : We will save off clRGPA values in stable_storage_path and then extract +epprd_rg:process_resources(8.317)[export_filesystems:1641] : each RG into STABLE_STORAGE_PATH for cl_unexport_fs. +epprd_rg:process_resources(8.317)[export_filesystems:1643] stable_storage_path='' +epprd_rg:process_resources(8.317)[export_filesystems:1643] typeset stable_storage_path +epprd_rg:process_resources(8.317)[export_filesystems:1645] export NFSSTOPPED +epprd_rg:process_resources(8.317)[export_filesystems:1650] export GROUPNAME +epprd_rg:process_resources(8.318)[export_filesystems:1652] get_list_head /board_org,/sapmnt/EPP +epprd_rg:process_resources(8.319)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(8.319)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(8.319)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(8.319)[get_list_head:60] set -x +epprd_rg:process_resources(8.320)[get_list_head:61] echo /board_org,/sapmnt/EPP +epprd_rg:process_resources(8.321)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(8.321)[get_list_head:61] IFS=: +epprd_rg:process_resources(8.322)[get_list_head:62] echo /board_org,/sapmnt/EPP +epprd_rg:process_resources(8.322)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(8.319)[export_filesystems:1652] read LIST_OF_EXPORT_FILE_SYSTEMS_FOR_RG +epprd_rg:process_resources(8.326)[export_filesystems:1653] get_list_tail /board_org,/sapmnt/EPP +epprd_rg:process_resources(8.326)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(8.326)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(8.326)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(8.326)[get_list_tail:68] set -x +epprd_rg:process_resources(8.328)[get_list_tail:69] echo /board_org,/sapmnt/EPP +epprd_rg:process_resources(8.328)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(8.328)[get_list_tail:69] IFS=: +epprd_rg:process_resources(8.329)[get_list_tail:70] echo +epprd_rg:process_resources(8.327)[export_filesystems:1653] read EXPORT_FILE_SYSTEMS +epprd_rg:process_resources(8.331)[export_filesystems:1654] get_list_head +epprd_rg:process_resources(8.331)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(8.331)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(8.331)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(8.332)[get_list_head:60] set -x +epprd_rg:process_resources(8.333)[get_list_head:61] echo +epprd_rg:process_resources(8.334)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(8.334)[get_list_head:61] IFS=: +epprd_rg:process_resources(8.335)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(8.335)[get_list_head:62] echo +epprd_rg:process_resources(8.331)[export_filesystems:1654] read LIST_OF_EXPORT_FILE_SYSTEMS_V4_FOR_RG +epprd_rg:process_resources(8.338)[export_filesystems:1655] get_list_tail +epprd_rg:process_resources(8.338)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(8.338)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(8.339)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(8.339)[get_list_tail:68] set -x +epprd_rg:process_resources(8.340)[get_list_tail:69] echo +epprd_rg:process_resources(8.340)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(8.340)[get_list_tail:69] IFS=: +epprd_rg:process_resources(8.341)[get_list_tail:70] echo +epprd_rg:process_resources(8.340)[export_filesystems:1655] read EXPORT_FILE_SYSTEMS_V4 +epprd_rg:process_resources(8.343)[export_filesystems:1656] get_list_head +epprd_rg:process_resources(8.343)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(8.343)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(8.343)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(8.343)[get_list_head:60] set -x +epprd_rg:process_resources(8.344)[get_list_head:61] echo +epprd_rg:process_resources(8.344)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(8.344)[get_list_head:61] IFS=: +epprd_rg:process_resources(8.345)[get_list_head:62] echo +epprd_rg:process_resources(8.346)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(8.343)[export_filesystems:1656] read STABLE_STORAGE_PATH +epprd_rg:process_resources(8.349)[export_filesystems:1657] get_list_tail +epprd_rg:process_resources(8.350)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(8.350)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(8.350)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(8.350)[get_list_tail:68] set -x +epprd_rg:process_resources(8.351)[get_list_tail:69] echo +epprd_rg:process_resources(8.351)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(8.351)[get_list_tail:69] IFS=: +epprd_rg:process_resources(8.351)[get_list_tail:70] echo +epprd_rg:process_resources(8.350)[export_filesystems:1657] read stable_storage_path +epprd_rg:process_resources(8.352)[export_filesystems:1659] cl_export_fs epprd:epprda:epprds '/board_org /sapmnt/EPP' '' +epprd_rg:cl_sync_vgs(0.067):datavg[check_sync:94] disklist=$'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:cl_sync_vgs(0.068):datavg[check_sync:95] print -- $'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:cl_sync_vgs(0.070):datavg[check_sync:95] grep -w missing +epprd_rg:cl_sync_vgs(0.072):datavg[check_sync:95] cut -f1 '-d ' +epprd_rg:cl_export_fs[102] version=%I% +epprd_rg:cl_export_fs[105] . /usr/es/sbin/cluster/events/utils/cl_nfs_utils +epprd_rg:cl_export_fs[98] PROGNAME=cl_export_fs +epprd_rg:cl_export_fs[99] [[ high == high ]] +epprd_rg:cl_export_fs[101] set -x +epprd_rg:cl_export_fs[102] version=%I +epprd_rg:cl_export_fs[105] cl_exports_data='' +epprd_rg:cl_export_fs[105] typeset cl_exports_data +epprd_rg:cl_export_fs[106] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_export_fs[107] HOST=epprd:epprda:epprds +epprd_rg:cl_export_fs[108] EXPORT_V3='/board_org /sapmnt/EPP' +epprd_rg:cl_export_fs[109] EXPORT_V4='' +epprd_rg:cl_export_fs[111] STATUS=0 +epprd_rg:cl_export_fs[113] LIMIT=60 +epprd_rg:cl_export_fs[113] WAIT=1 +epprd_rg:cl_export_fs[113] TRY=0 +epprd_rg:cl_export_fs[113] typeset -li LIMIT WAIT TRY +epprd_rg:cl_export_fs[115] PROC_RES=false +epprd_rg:cl_export_fs[118] : If JOB_TYPE is set, and it does not equal to GROUP, then +epprd_rg:cl_export_fs[119] : we are processing for process_resources +epprd_rg:cl_export_fs[121] [[ EXPORT_FILESYSTEMS != 0 ]] +epprd_rg:cl_export_fs[121] [[ EXPORT_FILESYSTEMS != GROUP ]] +epprd_rg:cl_export_fs[122] PROC_RES=true +epprd_rg:cl_export_fs[125] set -u +epprd_rg:cl_export_fs[127] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_export_fs[129] (( 3 < 2 || 3 > 3 )) +epprd_rg:cl_export_fs[142] DARE_EVENT=reconfig_resource_acquire +epprd_rg:cl_export_fs[145] : Check memory to see if NFSv4 exports have been configured. +epprd_rg:cl_export_fs[147] export_v4='' +epprd_rg:cl_export_fs[148] [[ -z '' ]] +epprd_rg:cl_export_fs[148] [[ rg_move == reconfig_resource_acquire ]] +epprd_rg:cl_export_fs[158] : If we do not have NFSv4 exports configured, then determine +epprd_rg:cl_export_fs[159] : the protocol versions from the HACMP exports file. +epprd_rg:cl_export_fs[161] [[ -z '' ]] +epprd_rg:cl_export_fs[161] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[163] export_v3='' +epprd_rg:cl_export_fs[169] : Get the export file for the filesystem from the exports file. +epprd_rg:cl_export_fs[170] : Only look at the part of the line preceding comments. +epprd_rg:cl_export_fs[173] getline_exports /board_org +epprd_rg:cl_export_fs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_export_fs[getline_exports:45] line='' +epprd_rg:cl_export_fs[getline_exports:45] typeset line +epprd_rg:cl_export_fs[getline_exports:46] flag=0 +epprd_rg:cl_export_fs[getline_exports:46] typeset -i flag +epprd_rg:cl_export_fs[getline_exports:47] fs=/board_org +epprd_rg:cl_export_fs[getline_exports:49] [[ -z /board_org ]] +epprd_rg:cl_export_fs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_sync_vgs(0.075):datavg[check_sync:95] missing_disklist='' +epprd_rg:cl_export_fs[getline_exports:56] read -r line +epprd_rg:cl_export_fs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_export_fs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_sync_vgs(0.076):datavg[check_sync:96] print -- $'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:cl_export_fs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_sync_vgs(0.078):datavg[check_sync:96] grep -w removed +epprd_rg:cl_sync_vgs(0.079):datavg[check_sync:96] cut -f1 '-d ' +epprd_rg:cl_export_fs[getline_exports:69] (( 1 == 0 )) +epprd_rg:cl_export_fs[getline_exports:74] [[ 0 == 1 ]] +epprd_rg:cl_export_fs[getline_exports:56] read -r line +epprd_rg:cl_export_fs[getline_exports:89] return 0 +epprd_rg:cl_export_fs[174] export_line='' +epprd_rg:cl_export_fs[177] : The line is of the format: filesystem -option1,option2,... +epprd_rg:cl_export_fs[178] : This will give 'option1 option2 ...' +epprd_rg:cl_export_fs[180] echo +epprd_rg:cl_sync_vgs(0.083):datavg[check_sync:96] removed_disklist='' +epprd_rg:cl_sync_vgs(0.083):datavg[check_sync:100] : Proceeed if there are some disks that LVM thinks are inaccessable +epprd_rg:cl_sync_vgs(0.083):datavg[check_sync:102] [[ -n '' ]] +epprd_rg:cl_sync_vgs(0.083):datavg[check_sync:102] [[ -n '' ]] +epprd_rg:cl_sync_vgs(0.083):datavg[check_sync:196] : sync if any LVs in the VG that have stale partitions +epprd_rg:cl_sync_vgs(0.083):datavg[check_sync:198] (( 0 == 0 )) +epprd_rg:cl_sync_vgs(0.083):datavg[check_sync:201] : A status of 2,3,5 or 7 indicates the presence of dirty and/or stale partitions +epprd_rg:cl_sync_vgs(0.083):datavg[check_sync:213] is_start_logged=0 +epprd_rg:cl_sync_vgs(0.083):datavg[check_sync:218] at_least_one_sync_success=0 +epprd_rg:cl_export_fs[180] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_sync_vgs(0.084):datavg[check_sync:219] lqueryvg -g 00c44af100004b00000001851e9dc053 -L +epprd_rg:cl_export_fs[180] cut -d- -f2- +epprd_rg:cl_export_fs[181] tr , ' ' +epprd_rg:cl_export_fs[180] options='' +epprd_rg:cl_export_fs[184] : Each option can be of the format name=value, or just name. +epprd_rg:cl_export_fs[185] : We only care about the vers option. +epprd_rg:cl_export_fs[189] : Have we seen the vers 'option?' +epprd_rg:cl_export_fs[191] vers_missing=1 +epprd_rg:cl_export_fs[194] : Loop through all of the export options for this export. +epprd_rg:cl_export_fs[218] : If we did not find the vers option, then NFSv3 is the default. +epprd_rg:cl_export_fs[220] (( vers_missing )) +epprd_rg:cl_export_fs[220] export_v3=' /board_org' +epprd_rg:cl_export_fs[169] : Get the export file for the filesystem from the exports file. +epprd_rg:cl_export_fs[170] : Only look at the part of the line preceding comments. +epprd_rg:cl_export_fs[173] getline_exports /sapmnt/EPP +epprd_rg:cl_export_fs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_export_fs[getline_exports:45] line='' +epprd_rg:cl_export_fs[getline_exports:45] typeset line +epprd_rg:cl_export_fs[getline_exports:46] flag=0 +epprd_rg:cl_export_fs[getline_exports:46] typeset -i flag +epprd_rg:cl_export_fs[getline_exports:47] fs=/sapmnt/EPP +epprd_rg:cl_export_fs[getline_exports:49] [[ -z /sapmnt/EPP ]] +epprd_rg:cl_export_fs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_export_fs[getline_exports:56] read -r line +epprd_rg:cl_export_fs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_export_fs[getline_exports:68] grep -q '^[[:space:]]*/sapmnt/EPP[[:space:]]' +epprd_rg:cl_export_fs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:69] (( 0 == 0 )) +epprd_rg:cl_export_fs[getline_exports:71] flag=1 +epprd_rg:cl_export_fs[getline_exports:74] [[ 1 == 1 ]] +epprd_rg:cl_export_fs[getline_exports:76] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:76] grep -w '\\' +epprd_rg:cl_export_fs[getline_exports:76] [[ -n '' ]] +epprd_rg:cl_export_fs[getline_exports:81] cl_exports_data=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:82] break +epprd_rg:cl_export_fs[getline_exports:89] return 0 +epprd_rg:cl_export_fs[174] export_line=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[177] : The line is of the format: filesystem -option1,option2,... +epprd_rg:cl_export_fs[178] : This will give 'option1 option2 ...' +epprd_rg:cl_export_fs[180] echo /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_export_fs[180] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_export_fs[180] cut -d- -f2- +epprd_rg:cl_export_fs[181] tr , ' ' +epprd_rg:cl_export_fs[180] options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap root=epprdap ' +epprd_rg:cl_export_fs[184] : Each option can be of the format name=value, or just name. +epprd_rg:cl_export_fs[185] : We only care about the vers option. +epprd_rg:cl_export_fs[189] : Have we seen the vers 'option?' +epprd_rg:cl_export_fs[191] vers_missing=1 +epprd_rg:cl_export_fs[194] : Loop through all of the export options for this export. +epprd_rg:cl_export_fs[218] : If we did not find the vers option, then NFSv3 is the default. +epprd_rg:cl_export_fs[220] (( vers_missing )) +epprd_rg:cl_export_fs[220] export_v3=' /board_org /sapmnt/EPP' +epprd_rg:cl_export_fs[223] EXPORT_V3=' /board_org /sapmnt/EPP' +epprd_rg:cl_export_fs[224] EXPORT_V4='' +epprd_rg:cl_export_fs[227] /usr/sbin/bootinfo -K +epprd_rg:cl_sync_vgs(0.086):datavg[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.112):datavg[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.112):datavg.epprdaloglv[check_sync:221] PS4_LOOP=datavg.epprdaloglv +epprd_rg:cl_sync_vgs(0.112):datavg.epprdaloglv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.112):datavg.epprdaloglv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.112):datavg.epprdaloglv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.112):datavg.epprdaloglv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.112):datavg.epprdaloglv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.112):datavg.saplv[check_sync:221] PS4_LOOP=datavg.saplv +epprd_rg:cl_sync_vgs(0.112):datavg.saplv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.112):datavg.saplv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.112):datavg.saplv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.112):datavg.saplv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.112):datavg.saplv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.112):datavg.sapmntlv[check_sync:221] PS4_LOOP=datavg.sapmntlv +epprd_rg:cl_sync_vgs(0.112):datavg.sapmntlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.112):datavg.sapmntlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.112):datavg.sapmntlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.112):datavg.sapmntlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.112):datavg.sapmntlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.112):datavg.oraclelv[check_sync:221] PS4_LOOP=datavg.oraclelv +epprd_rg:cl_sync_vgs(0.112):datavg.oraclelv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.112):datavg.oraclelv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.112):datavg.oraclelv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.112):datavg.oraclelv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.112):datavg.oraclelv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.112):datavg.epplv[check_sync:221] PS4_LOOP=datavg.epplv +epprd_rg:cl_sync_vgs(0.112):datavg.epplv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.112):datavg.epplv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.112):datavg.epplv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.112):datavg.epplv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.112):datavg.epplv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.112):datavg.oraarchlv[check_sync:221] PS4_LOOP=datavg.oraarchlv +epprd_rg:cl_sync_vgs(0.112):datavg.oraarchlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.112):datavg.oraarchlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.112):datavg.oraarchlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.112):datavg.oraarchlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.112):datavg.oraarchlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.112):datavg.sapdata1lv[check_sync:221] PS4_LOOP=datavg.sapdata1lv +epprd_rg:cl_sync_vgs(0.112):datavg.sapdata1lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.112):datavg.sapdata1lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.112):datavg.sapdata1lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.112):datavg.sapdata1lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.112):datavg.sapdata1lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.112):datavg.sapdata2lv[check_sync:221] PS4_LOOP=datavg.sapdata2lv +epprd_rg:cl_sync_vgs(0.112):datavg.sapdata2lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 &&+epprd_rg:cl_export_fs[227] KERNEL_BITS=64 +epprd_rg:cl_export_fs[229] subsystems='nfsd rpc.mountd' +epprd_rg:cl_export_fs[230] [[ -n '' ]] +epprd_rg:cl_export_fs[233] : Special processing for cross mounts of EFS keys +epprd_rg:cl_export_fs[234] : The overmount of /var/efs must be removed prior +epprd_rg:cl_export_fs[235] : to stopping or restarting NFS, since the SRC +epprd_rg:cl_export_fs[236] : operations will attempt to check the EFS enablement. 1 != 7 )) +epprd_rg:cl_sync_vgs(0.112):datavg.sapdata2lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.112):datavg.sapdata2lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.112):datavg.sapdata2lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.112):datavg.sapdata2lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.112):datavg.sapdata3lv[check_sync:221] PS4_LOOP=datavg.sapdata3lv +epprd_rg:cl_sync_vgs(0.112):datavg.sapdata3lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.112):datavg.sapdata3lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.112):datavg.sapdata3lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.112):datavg.sapdata3lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.112):datavg.sapdata3lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.112):datavg.sapdata4lv[check_sync:221] PS4_LOOP=datavg.sapdata4lv +epprd_rg:cl_sync_vgs(0.112):datavg.sapdata4lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.112):datavg.sapdata4lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.112):datavg.sapdata4lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.112):datavg.sapdata4lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.112):datavg.sapdata4lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.112):datavg.boardlv[check_sync:221] PS4_LOOP=datavg.boardlv +epprd_rg:cl_sync_vgs(0.112):datavg.boardlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.112):datavg.boardlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.112):datavg.boardlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.112):datavg.boardlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.112):datavg.boardlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.112):datavg.origlogAlv[check_sync:221] PS4_LOOP=datavg.origlogAlv +epprd_rg:cl_sync_vgs(0.112):datavg.origlogAlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.112):datavg.origlogAlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.112):datavg.origlogAlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.112):datavg.origlogAlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.112):datavg.origlogAlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.112):datavg.origlogBlv[check_sync:221] PS4_LOOP=datavg.origlogBlv +epprd_rg:cl_sync_vgs(0.112):datavg.origlogBlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.112):datavg.origlogBlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.113):datavg.origlogBlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.113):datavg.origlogBlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.113):datavg.origlogBlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.113):datavg.mirrlogAlv[check_sync:221] PS4_LOOP=datavg.mirrlogAlv +epprd_rg:cl_sync_vgs(0.113):datavg.mirrlogAlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.113):datavg.mirrlogAlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.113):datavg.mirrlogAlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.113):datavg.mirrlogAlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.113):datavg.mirrlogAlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.113):datavg.mirrlogBlv[check_sync:221] PS4_LOOP=datavg.mirrlogBlv +epprd_rg:cl_sync_vgs(0.113):datavg.mirrlogBlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.113):datavg.mirrlogBlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.113):datavg.mirrlogBlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.113):datavg.mirrlogBlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.113):datavg.mirrlogBlv[check_sync:268] [[ -n RG_MOVE ]] +epprd_rg:cl_sync_vgs(0.113):datavg.mirrlogBlv[check_sync:268] (( 0 == 1 )) +epprd_rg:cl_sync_vgs[355] exit 0 +epprd_rg:cl_export_fs[238] mount +epprd_rg:cl_export_fs[238] grep -w /var/efs +epprd_rg:cl_export_fs[238] mounted_info='' +epprd_rg:cl_export_fs[239] [[ -n '' ]] +epprd_rg:cl_export_fs[295] : Kill and restart everything in '"nfsd' 'rpc.mountd"' +epprd_rg:cl_export_fs[299] : Kill nfsd, and restart it below +epprd_rg:cl_export_fs[306] [[ nfsd == nfsd ]] +epprd_rg:cl_export_fs[307] [[ 64 == 64 ]] +epprd_rg:cl_export_fs[307] [[ -x /usr/sbin/nfs4smctl ]] +epprd_rg:cl_export_fs[308] [[ ! -s /etc/xtab ]] +epprd_rg:cl_export_fs[311] clcheck_server nfsd +epprd_rg:clcheck_server[118] [[ high == high ]] +epprd_rg:clcheck_server[118] version=1.10.4.2 +epprd_rg:clcheck_server[119] cl_get_path +epprd_rg:clcheck_server[119] HA_DIR=es +epprd_rg:clcheck_server[121] SERVER=nfsd +epprd_rg:clcheck_server[122] STATUS=0 +epprd_rg:clcheck_server[123] FATAL_ERROR=255 +epprd_rg:clcheck_server[124] retries=0 +epprd_rg:clcheck_server[124] typeset -li retries +epprd_rg:clcheck_server[126] [[ -n nfsd ]] +epprd_rg:clcheck_server[131] lssrc -s nfsd +epprd_rg:clcheck_server[131] LC_ALL=C +epprd_rg:clcheck_server[131] grep 'not on file' +epprd_rg:clcheck_server[131] wc -l +epprd_rg:clcheck_server[131] rc=' 0' +epprd_rg:clcheck_server[133] (( 0 == 1 )) +epprd_rg:clcheck_server[143] [[ 0 =~ 3 ]] +epprd_rg:clcheck_server[147] lssrc -s nfsd +epprd_rg:clcheck_server[147] 1> /dev/null 2> /dev/null +epprd_rg:clcheck_server[161] egrep 'stop|active' +epprd_rg:clcheck_server[161] lssrc -s nfsd +epprd_rg:clcheck_server[161] LC_ALL=C +epprd_rg:clcheck_server[161] check_if_down=' nfsd nfs 28377402 active' +epprd_rg:clcheck_server[166] [[ -z ' nfsd nfs 28377402 active' ]] +epprd_rg:clcheck_server[187] check_server_extended nfsd +epprd_rg:clcheck_server[check_server_extended:55] [[ high == high ]] +epprd_rg:clcheck_server[check_server_extended:55] set -x +epprd_rg:clcheck_server[check_server_extended:58] SERVER=nfsd +epprd_rg:clcheck_server[check_server_extended:58] typeset SERVER +epprd_rg:clcheck_server[check_server_extended:59] STATUS=1 +epprd_rg:clcheck_server[check_server_extended:59] typeset STATUS +epprd_rg:clcheck_server[check_server_extended:87] echo 1 +epprd_rg:clcheck_server[check_server_extended:88] return +epprd_rg:clcheck_server[187] STATUS=1 +epprd_rg:clcheck_server[188] return 1 +epprd_rg:cl_export_fs[329] : nfsv4 daemon not stopped due to existing mounts +epprd_rg:cl_export_fs[330] : Turn on NFSv4 grace periods and ignore any errors. +epprd_rg:cl_export_fs[332] chnfs -I -g on -x 1 +epprd_rg:cl_export_fs[332] ODMDIR=/etc/objrepos 0513-077 Subsystem has been changed. 0513-077 Subsystem has been changed. +epprd_rg:cl_export_fs[299] : Kill rpc.mountd, and restart it below +epprd_rg:cl_export_fs[306] [[ rpc.mountd == nfsd ]] +epprd_rg:cl_export_fs[336] : Friendly stop of rpc.mountd +epprd_rg:cl_export_fs[338] lssrc -s rpc.mountd +epprd_rg:cl_export_fs[338] LC_ALL=C +epprd_rg:cl_export_fs[338] tail +2 +epprd_rg:cl_export_fs[338] grep -qw active +epprd_rg:cl_export_fs[338] stopsrc -s rpc.mountd 0513-044 The rpc.mountd Subsystem was requested to stop. +epprd_rg:cl_export_fs[341] : Now, wait for rpc.mountd to die +epprd_rg:cl_export_fs[343] (( TRY=0)) +epprd_rg:cl_export_fs[343] (( 0 < 60)) +epprd_rg:cl_export_fs[345] lssrc -s rpc.mountd +epprd_rg:cl_export_fs[345] LC_ALL=C +epprd_rg:cl_export_fs[345] tail +2 +epprd_rg:cl_export_fs[345] subsys_state=' rpc.mountd nfs inoperative' +epprd_rg:cl_export_fs[346] print -- ' rpc.mountd nfs inoperative' +epprd_rg:cl_export_fs[346] grep -qw inoperative +epprd_rg:cl_export_fs[348] [[ high == high ]] +epprd_rg:cl_export_fs[348] set -x +epprd_rg:cl_export_fs[349] subsys_state=inoperative +epprd_rg:cl_export_fs[350] break +epprd_rg:cl_export_fs[356] [[ high == high ]] +epprd_rg:cl_export_fs[356] set -x +epprd_rg:cl_export_fs[358] [[ inoperative != inoperative ]] +epprd_rg:cl_export_fs[382] : If stopsrc has failed to stop rpc.mountd, +epprd_rg:cl_export_fs[383] : use a real kill on the daemon +epprd_rg:cl_export_fs[385] grep -w rpc.mountd +epprd_rg:cl_export_fs[385] ps -eo comm,pid +epprd_rg:cl_export_fs[385] grep -vw grep +epprd_rg:cl_export_fs[385] read skip subsys_pid rest +epprd_rg:cl_export_fs[386] [[ '' == +([0-9]) ]] +epprd_rg:cl_export_fs[389] : If rpc.mountd has been stopped, +epprd_rg:cl_export_fs[390] : start it back up again. +epprd_rg:cl_export_fs[392] clcheck_server rpc.mountd +epprd_rg:clcheck_server[118] [[ high == high ]] +epprd_rg:clcheck_server[118] version=1.10.4.2 +epprd_rg:clcheck_server[119] cl_get_path +epprd_rg:clcheck_server[119] HA_DIR=es +epprd_rg:clcheck_server[121] SERVER=rpc.mountd +epprd_rg:clcheck_server[122] STATUS=0 +epprd_rg:clcheck_server[123] FATAL_ERROR=255 +epprd_rg:clcheck_server[124] retries=0 +epprd_rg:clcheck_server[124] typeset -li retries +epprd_rg:clcheck_server[126] [[ -n rpc.mountd ]] +epprd_rg:clcheck_server[131] lssrc -s rpc.mountd +epprd_rg:clcheck_server[131] LC_ALL=C +epprd_rg:clcheck_server[131] grep 'not on file' +epprd_rg:clcheck_server[131] wc -l +epprd_rg:clcheck_server[131] rc=' 0' +epprd_rg:clcheck_server[133] (( 0 == 1 )) +epprd_rg:clcheck_server[143] [[ 0 =~ 3 ]] +epprd_rg:clcheck_server[147] lssrc -s rpc.mountd +epprd_rg:clcheck_server[147] 1> /dev/null 2> /dev/null +epprd_rg:clcheck_server[161] lssrc -s rpc.mountd +epprd_rg:clcheck_server[161] LC_ALL=C +epprd_rg:clcheck_server[161] egrep 'stop|active' +epprd_rg:clcheck_server[161] check_if_down='' +epprd_rg:clcheck_server[166] [[ -z '' ]] +epprd_rg:clcheck_server[171] sleep 1 +epprd_rg:clcheck_server[172] lssrc -s rpc.mountd +epprd_rg:clcheck_server[172] LC_ALL=C +epprd_rg:clcheck_server[172] egrep 'stop|active' +epprd_rg:clcheck_server[172] check_if_down='' +epprd_rg:clcheck_server[173] [[ -z '' ]] +epprd_rg:clcheck_server[177] return 0 +epprd_rg:cl_export_fs[394] [[ rpc.mountd == nfsd ]] +epprd_rg:cl_export_fs[403] : Start rpc.mountd back up again +epprd_rg:cl_export_fs[405] startsrc -s rpc.mountd 0513-059 The rpc.mountd Subsystem has been started. Subsystem PID is 20054400. +epprd_rg:cl_export_fs[406] rc=0 +epprd_rg:cl_export_fs[407] (( 0 == 0 )) +epprd_rg:cl_export_fs[409] sleep 3 +epprd_rg:cl_export_fs[410] lssrc -s rpc.mountd +epprd_rg:cl_export_fs[410] LC_ALL=C +epprd_rg:cl_export_fs[410] tail +2 +epprd_rg:cl_export_fs[410] subsys_state=' rpc.mountd nfs 20054400 active' +epprd_rg:cl_export_fs[413] (( 0 != 0 )) +epprd_rg:cl_export_fs[413] print -- ' rpc.mountd nfs 20054400 active' +epprd_rg:cl_export_fs[413] grep -qw active +epprd_rg:cl_export_fs[431] : Set the NFSv4 nfsroot parameter. This must be set prior to any +epprd_rg:cl_export_fs[432] : NFS exports that use the exname option, and cannot be set to a new +epprd_rg:cl_export_fs[433] : value if any exname exports already exist. This is normally done +epprd_rg:cl_export_fs[434] : at IPL, but rc.nfs is not run at boot when HACMP is installed. +epprd_rg:cl_export_fs[436] [[ -n '' ]] +epprd_rg:cl_export_fs[438] hasrv='' +epprd_rg:cl_export_fs[440] [[ -z '' ]] +epprd_rg:cl_export_fs[442] query=name='STABLE_STORAGE_PATH AND group=epprd_rg' +epprd_rg:cl_export_fs[443] odmget -q name='STABLE_STORAGE_PATH AND group=epprd_rg' HACMPresource +epprd_rg:cl_export_fs[444] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_export_fs[443] STABLE_STORAGE_PATH='' +epprd_rg:cl_export_fs[447] [[ -z '' ]] +epprd_rg:cl_export_fs[449] STABLE_STORAGE_PATH=/var/adm/nfsv4.hacmp/epprd_rg +epprd_rg:cl_export_fs[452] [[ -z '' ]] +epprd_rg:cl_export_fs[454] query=name='STABLE_STORAGE_COOKIE AND group=epprd_rg' +epprd_rg:cl_export_fs[455] odmget -q name='STABLE_STORAGE_COOKIE AND group=epprd_rg' HACMPresource +epprd_rg:cl_export_fs[456] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_export_fs[455] STABLE_STORAGE_COOKIE='' +epprd_rg:cl_export_fs[459] [[ -n epprd_rg ]] +epprd_rg:cl_export_fs[461] odmget -q 'name = SERVICE_LABEL and group = epprd_rg' HACMPresource +epprd_rg:cl_export_fs[462] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:cl_export_fs[461] SERVICE_LABEL=epprd +epprd_rg:cl_export_fs[465] primary epprd +epprd_rg:cl_export_fs[primary:55] echo epprd +epprd_rg:cl_export_fs[465] primary=epprd +epprd_rg:cl_export_fs[466] secondary epprd +epprd_rg:cl_export_fs[secondary:74] [[ -n epprd ]] +epprd_rg:cl_export_fs[secondary:74] shift +epprd_rg:cl_export_fs[secondary:75] echo '' +epprd_rg:cl_export_fs[466] secondary='' +epprd_rg:cl_export_fs[468] nfs_node_state='' +epprd_rg:cl_export_fs[471] : Determine if grace periods are enabled +epprd_rg:cl_export_fs[473] ps -eo args +epprd_rg:cl_export_fs[473] grep -w nfsd +epprd_rg:cl_export_fs[473] grep -qw -- '-gp on' +epprd_rg:cl_export_fs[476] gp=off +epprd_rg:cl_export_fs[480] : We can use an NFSv4 node if grace periods are enabled, we are running a +epprd_rg:cl_export_fs[481] : 64-bit kernel, and the nfs4smctl command exists. +epprd_rg:cl_export_fs[483] [[ off == on ]] +epprd_rg:cl_export_fs[487] rm -f '/var/adm/nfsv4.hacmp/epprd_rg/*' +epprd_rg:cl_export_fs[487] 2> /dev/null +epprd_rg:cl_export_fs[491] : If we have NFSv4 exports, then we need to configure our NFS node so that +epprd_rg:cl_export_fs[492] : we can use stable storage. Note, NFS only supports this functionality in +epprd_rg:cl_export_fs[493] : its 64-bit kernels. +epprd_rg:cl_export_fs[495] [[ -n '' ]] +epprd_rg:cl_export_fs[580] [[ '' == acquiring ]] +epprd_rg:cl_export_fs[585] ALLEXPORTS=All_exports +epprd_rg:cl_export_fs[587] : update resource manager with this action +epprd_rg:cl_export_fs[589] cl_RMupdate resource_acquiring All_exports cl_export_fs 2023-01-28T18:03:52.627290 2023-01-28T18:03:52.631558 +epprd_rg:cl_export_fs[592] : Build a list of all filesystems that need to be exported, irrespective of +epprd_rg:cl_export_fs[593] : the protocol version. Since some filesystems may be exported with multiple +epprd_rg:cl_export_fs[594] : versions, remove any duplicates. +epprd_rg:cl_export_fs[596] echo /board_org /sapmnt/EPP +epprd_rg:cl_export_fs[596] tr ' ' '\n' +epprd_rg:cl_export_fs[596] sort -u +epprd_rg:cl_export_fs[596] FILESYSTEM_LIST=$'/board_org\n/sapmnt/EPP' +epprd_rg:cl_export_fs[599] : Loop through all of the filesystems we need to export ... +epprd_rg:cl_export_fs[603] v3='' +epprd_rg:cl_export_fs[604] v4='' +epprd_rg:cl_export_fs[605] root=epprd:epprda:epprds +epprd_rg:cl_export_fs[606] new_options='' +epprd_rg:cl_export_fs[607] export_file_line='' +epprd_rg:cl_export_fs[608] USING_EXPORTS_FILE=0 +epprd_rg:cl_export_fs[609] export_lines='' +epprd_rg:cl_export_fs[610] otheroption='' +epprd_rg:cl_export_fs[613] : Get the export line from exportfs for this export +epprd_rg:cl_export_fs[615] exportfs +epprd_rg:cl_export_fs[615] grep '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_export_fs[615] export_line='' +epprd_rg:cl_export_fs[617] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[620] : Get the export file for the filesystem from the exports file. +epprd_rg:cl_export_fs[621] : Only look at the part of the line preceding comments. +epprd_rg:cl_export_fs[624] getline_exports /board_org +epprd_rg:cl_export_fs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_export_fs[getline_exports:45] line='' +epprd_rg:cl_export_fs[getline_exports:45] typeset line +epprd_rg:cl_export_fs[getline_exports:46] flag=0 +epprd_rg:cl_export_fs[getline_exports:46] typeset -i flag +epprd_rg:cl_export_fs[getline_exports:47] fs=/board_org +epprd_rg:cl_export_fs[getline_exports:49] [[ -z /board_org ]] +epprd_rg:cl_export_fs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_export_fs[getline_exports:56] read -r line +epprd_rg:cl_export_fs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_export_fs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_export_fs[getline_exports:69] (( 1 == 0 )) +epprd_rg:cl_export_fs[getline_exports:74] [[ 0 == 1 ]] +epprd_rg:cl_export_fs[getline_exports:56] read -r line +epprd_rg:cl_export_fs[getline_exports:89] return 0 +epprd_rg:cl_export_fs[625] export_file_line='' +epprd_rg:cl_export_fs[628] : If the administrator provides an entry for the filesystem in the +epprd_rg:cl_export_fs[629] : exports file then ignore the root option that was passed in on the +epprd_rg:cl_export_fs[630] : command line. +epprd_rg:cl_export_fs[632] [[ -n '' ]] +epprd_rg:cl_export_fs[636] : If the filesystem currently is not exported, then get the options from +epprd_rg:cl_export_fs[637] : the exports file. We will merge these options with options specified +epprd_rg:cl_export_fs[638] : through resource group attributes to produce the actual options we will +epprd_rg:cl_export_fs[639] : provide to exportfs. +epprd_rg:cl_export_fs[641] [[ -z '' ]] +epprd_rg:cl_export_fs[643] export_line='' +epprd_rg:cl_export_fs[644] USING_EXPORTS_FILE=1 +epprd_rg:cl_export_fs[648] : In case of multiple exports for same filesystem +epprd_rg:cl_export_fs[649] : Process them line by line +epprd_rg:cl_export_fs[651] set +u +epprd_rg:cl_export_fs[652] oldifs=$' \t\n' +epprd_rg:cl_export_fs[653] IFS=$'\n' +epprd_rg:cl_export_fs[653] export_lines=( ) +epprd_rg:cl_export_fs[654] IFS=$' \t\n' +epprd_rg:cl_export_fs[656] [ -n '' ] +epprd_rg:cl_export_fs[733] set -u +epprd_rg:cl_export_fs[736] : At this point, v3 and v4 are set based on what is actually exported +epprd_rg:cl_export_fs[737] : or what is configured to be exported in the exports file. +epprd_rg:cl_export_fs[740] (( USING_EXPORTS_FILE )) +epprd_rg:cl_export_fs[742] v3='' +epprd_rg:cl_export_fs[743] v4='' +epprd_rg:cl_export_fs[747] : At this point, v3 and v4 are set based on what is actually exported. +epprd_rg:cl_export_fs[748] : Now add additional versions if the resource group has them configured. +epprd_rg:cl_export_fs[752] [[ /board_org == /board_org ]] +epprd_rg:cl_export_fs[752] v3=:2:3 +epprd_rg:cl_export_fs[752] break +epprd_rg:cl_export_fs[761] : Versions 2 and 3 are the default versions. Some versions of AIX do +epprd_rg:cl_export_fs[762] : not support the vers export option, so only use the option if we are +epprd_rg:cl_export_fs[763] : exporting a non-default value such as 4 +epprd_rg:cl_export_fs[765] [[ -n '' ]] +epprd_rg:cl_export_fs[779] [[ -n epprd:epprda:epprds ]] +epprd_rg:cl_export_fs[782] : If we have root priveliged clients, +epprd_rg:cl_export_fs[783] : then add them to the option list. +epprd_rg:cl_export_fs[785] new_options=,root=epprd:epprda:epprds +epprd_rg:cl_export_fs[788] [[ -n '' ]] +epprd_rg:cl_export_fs[798] : Strip off the leading comma +epprd_rg:cl_export_fs[800] echo ,root=epprd:epprda:epprds +epprd_rg:cl_export_fs[800] cut -d, -f2- +epprd_rg:cl_export_fs[800] new_options=root=epprd:epprda:epprds +epprd_rg:cl_export_fs[802] [[ -z root=epprd:epprda:epprds ]] +epprd_rg:cl_export_fs[811] : Exporting filesystem /board_org with options root=epprd:epprda:epprds +epprd_rg:cl_export_fs[813] exportfs -i -o root=epprd:epprda:epprds /board_org +epprd_rg:cl_export_fs[814] RC=0 +epprd_rg:cl_export_fs[817] (( 0 != 0 )) +epprd_rg:cl_export_fs[603] v3='' +epprd_rg:cl_export_fs[604] v4='' +epprd_rg:cl_export_fs[605] root=epprd:epprda:epprds +epprd_rg:cl_export_fs[606] new_options='' +epprd_rg:cl_export_fs[607] export_file_line='' +epprd_rg:cl_export_fs[608] USING_EXPORTS_FILE=0 +epprd_rg:cl_export_fs[609] export_lines='' +epprd_rg:cl_export_fs[610] otheroption='' +epprd_rg:cl_export_fs[613] : Get the export line from exportfs for this export +epprd_rg:cl_export_fs[615] exportfs +epprd_rg:cl_export_fs[615] grep '^[[:space:]]*/sapmnt/EPP[[:space:]]' +epprd_rg:cl_export_fs[615] export_line='' +epprd_rg:cl_export_fs[617] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[620] : Get the export file for the filesystem from the exports file. +epprd_rg:cl_export_fs[621] : Only look at the part of the line preceding comments. +epprd_rg:cl_export_fs[624] getline_exports /sapmnt/EPP +epprd_rg:cl_export_fs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_export_fs[getline_exports:45] line='' +epprd_rg:cl_export_fs[getline_exports:45] typeset line +epprd_rg:cl_export_fs[getline_exports:46] flag=0 +epprd_rg:cl_export_fs[getline_exports:46] typeset -i flag +epprd_rg:cl_export_fs[getline_exports:47] fs=/sapmnt/EPP +epprd_rg:cl_export_fs[getline_exports:49] [[ -z /sapmnt/EPP ]] +epprd_rg:cl_export_fs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_export_fs[getline_exports:56] read -r line +epprd_rg:cl_export_fs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_export_fs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:68] grep -q '^[[:space:]]*/sapmnt/EPP[[:space:]]' +epprd_rg:cl_export_fs[getline_exports:69] (( 0 == 0 )) +epprd_rg:cl_export_fs[getline_exports:71] flag=1 +epprd_rg:cl_export_fs[getline_exports:74] [[ 1 == 1 ]] +epprd_rg:cl_export_fs[getline_exports:76] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:76] grep -w '\\' +epprd_rg:cl_export_fs[getline_exports:76] [[ -n '' ]] +epprd_rg:cl_export_fs[getline_exports:81] cl_exports_data=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:82] break +epprd_rg:cl_export_fs[getline_exports:89] return 0 +epprd_rg:cl_export_fs[625] export_file_line=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[628] : If the administrator provides an entry for the filesystem in the +epprd_rg:cl_export_fs[629] : exports file then ignore the root option that was passed in on the +epprd_rg:cl_export_fs[630] : command line. +epprd_rg:cl_export_fs[632] [[ -n ' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' ]] +epprd_rg:cl_export_fs[632] root='' +epprd_rg:cl_export_fs[636] : If the filesystem currently is not exported, then get the options from +epprd_rg:cl_export_fs[637] : the exports file. We will merge these options with options specified +epprd_rg:cl_export_fs[638] : through resource group attributes to produce the actual options we will +epprd_rg:cl_export_fs[639] : provide to exportfs. +epprd_rg:cl_export_fs[641] [[ -z '' ]] +epprd_rg:cl_export_fs[643] export_line=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[644] USING_EXPORTS_FILE=1 +epprd_rg:cl_export_fs[648] : In case of multiple exports for same filesystem +epprd_rg:cl_export_fs[649] : Process them line by line +epprd_rg:cl_export_fs[651] set +u +epprd_rg:cl_export_fs[652] oldifs=$' \t\n' +epprd_rg:cl_export_fs[653] IFS=$'\n' +epprd_rg:cl_export_fs[653] export_lines=( ' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' ) +epprd_rg:cl_export_fs[654] IFS=$' \t\n' +epprd_rg:cl_export_fs[656] [ -n ' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' ] +epprd_rg:cl_export_fs[661] : The line is of the format: filesystem -option1,option2,... +epprd_rg:cl_export_fs[662] : This will give 'option1 option2 ...' +epprd_rg:cl_export_fs[664] echo /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_export_fs[664] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_export_fs[664] cut -d- -f2- +epprd_rg:cl_export_fs[665] tr , ' ' +epprd_rg:cl_export_fs[664] old_options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap root=epprdap ' +epprd_rg:cl_export_fs[668] : Each option can be of the format name=value, or just name. +epprd_rg:cl_export_fs[669] : We care about the hasrv, vers, and root options. +epprd_rg:cl_export_fs[673] : Loop through all of the export options for this export. +epprd_rg:cl_export_fs[700] [[ sec=sys:krb5p:krb5i:krb5:dh == -* ]] +epprd_rg:cl_export_fs[708] [[ sec=sys:krb5p:krb5i:krb5:dh == /sapmnt/EPP ]] +epprd_rg:cl_export_fs[716] echo sec=sys:krb5p:krb5i:krb5:dh +epprd_rg:cl_export_fs[716] cut -d= -f 1 +epprd_rg:cl_export_fs[716] otheroption=sec +epprd_rg:cl_export_fs[717] [[ '' == *sec* ]] +epprd_rg:cl_export_fs[724] : Merge in all remaining options. +epprd_rg:cl_export_fs[726] new_options=,sec=sys:krb5p:krb5i:krb5:dh +epprd_rg:cl_export_fs[700] [[ rw == -* ]] +epprd_rg:cl_export_fs[708] [[ rw == /sapmnt/EPP ]] +epprd_rg:cl_export_fs[716] echo rw +epprd_rg:cl_export_fs[716] cut -d= -f 1 +epprd_rg:cl_export_fs[716] otheroption=rw +epprd_rg:cl_export_fs[717] [[ ,sec=sys:krb5p:krb5i:krb5:dh == *rw* ]] +epprd_rg:cl_export_fs[724] : Merge in all remaining options. +epprd_rg:cl_export_fs[726] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw +epprd_rg:cl_export_fs[700] [[ access=epprdap == -* ]] +epprd_rg:cl_export_fs[708] [[ access=epprdap == /sapmnt/EPP ]] +epprd_rg:cl_export_fs[716] echo access=epprdap +epprd_rg:cl_export_fs[716] cut -d= -f 1 +epprd_rg:cl_export_fs[716] otheroption=access +epprd_rg:cl_export_fs[717] [[ ,sec=sys:krb5p:krb5i:krb5:dh,rw == *access* ]] +epprd_rg:cl_export_fs[724] : Merge in all remaining options. +epprd_rg:cl_export_fs[726] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap +epprd_rg:cl_export_fs[695] : Merge in the root option. +epprd_rg:cl_export_fs[697] echo root=epprdap +epprd_rg:cl_export_fs[697] cut -d= -f2- +epprd_rg:cl_export_fs[697] root=epprdap +epprd_rg:cl_export_fs[733] set -u +epprd_rg:cl_export_fs[736] : At this point, v3 and v4 are set based on what is actually exported +epprd_rg:cl_export_fs[737] : or what is configured to be exported in the exports file. +epprd_rg:cl_export_fs[740] (( USING_EXPORTS_FILE )) +epprd_rg:cl_export_fs[742] v3='' +epprd_rg:cl_export_fs[743] v4='' +epprd_rg:cl_export_fs[747] : At this point, v3 and v4 are set based on what is actually exported. +epprd_rg:cl_export_fs[748] : Now add additional versions if the resource group has them configured. +epprd_rg:cl_export_fs[752] [[ /sapmnt/EPP == /board_org ]] +epprd_rg:cl_export_fs[752] [[ /sapmnt/EPP == /sapmnt/EPP ]] +epprd_rg:cl_export_fs[752] v3=:2:3 +epprd_rg:cl_export_fs[752] break +epprd_rg:cl_export_fs[761] : Versions 2 and 3 are the default versions. Some versions of AIX do +epprd_rg:cl_export_fs[762] : not support the vers export option, so only use the option if we are +epprd_rg:cl_export_fs[763] : exporting a non-default value such as 4 +epprd_rg:cl_export_fs[765] [[ -n '' ]] +epprd_rg:cl_export_fs[779] [[ -n epprdap ]] +epprd_rg:cl_export_fs[782] : If we have root priveliged clients, +epprd_rg:cl_export_fs[783] : then add them to the option list. +epprd_rg:cl_export_fs[785] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_export_fs[788] [[ -n '' ]] +epprd_rg:cl_export_fs[798] : Strip off the leading comma +epprd_rg:cl_export_fs[800] echo ,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_export_fs[800] cut -d, -f2- +epprd_rg:cl_export_fs[800] new_options=sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_export_fs[802] [[ -z sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap ]] +epprd_rg:cl_export_fs[811] : Exporting filesystem /sapmnt/EPP with options sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_export_fs[813] exportfs -i -o sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap /sapmnt/EPP +epprd_rg:cl_export_fs[814] RC=0 +epprd_rg:cl_export_fs[817] (( 0 != 0 )) +epprd_rg:cl_export_fs[834] ALLNOERREXPORT=All_nonerror_exports +epprd_rg:cl_export_fs[836] : update resource manager with results +epprd_rg:cl_export_fs[838] cl_RMupdate resource_up All_nonerror_exports cl_export_fs 2023-01-28T18:03:52.748964 2023-01-28T18:03:52.753232 +epprd_rg:cl_export_fs[840] exit 0 +epprd_rg:process_resources(13.750)[export_filesystems:1662] RC=0 +epprd_rg:process_resources(13.750)[export_filesystems:1663] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources(13.750)[export_filesystems:1669] (( 0 != 0 )) +epprd_rg:process_resources(13.750)[export_filesystems:1675] return 0 +epprd_rg:process_resources(13.750)[3324] true +epprd_rg:process_resources(13.750)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(13.750)[3328] set -a +epprd_rg:process_resources(13.750)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:03:52.766320 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(13.763)[3329] eval JOB_TYPE=TELINIT +epprd_rg:process_resources(13.763)[1] JOB_TYPE=TELINIT +epprd_rg:process_resources(13.763)[3330] RC=0 +epprd_rg:process_resources(13.763)[3331] set +a +epprd_rg:process_resources(13.763)[3333] (( 0 != 0 )) +epprd_rg:process_resources(13.763)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(13.763)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(13.763)[3343] export GROUPNAME +epprd_rg:process_resources(13.763)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(13.763)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(13.763)[3360] [[ TELINIT == RELEASE ]] +epprd_rg:process_resources(13.763)[3360] [[ TELINIT == ONLINE ]] +epprd_rg:process_resources(13.763)[3435] cl_telinit +epprd_rg:cl_telinit[178] version=%I% +epprd_rg:cl_telinit[182] TELINIT_FILE=/usr/es/sbin/cluster/.telinit +epprd_rg:cl_telinit[183] USE_TELINIT_FILE=/usr/es/sbin/cluster/.use_telinit +epprd_rg:cl_telinit[185] [[ -f /usr/es/sbin/cluster/.use_telinit ]] +epprd_rg:cl_telinit[189] USE_TELINIT=0 +epprd_rg:cl_telinit[198] [[ '' == -boot ]] +epprd_rg:cl_telinit[236] cl_lsitab clinit +epprd_rg:cl_telinit[236] 1> /dev/null 2>& 1 +epprd_rg:cl_telinit[239] : telinit a disabled +epprd_rg:cl_telinit[241] return 0 +epprd_rg:process_resources(13.784)[3324] true +epprd_rg:process_resources(13.784)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(13.784)[3328] set -a +epprd_rg:process_resources(13.784)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:03:52.799892 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(13.796)[3329] eval JOB_TYPE=MOUNT_FILESYSTEMS ACTION=ACQUIRE FILE_SYSTEMS='"/board;/board_org"' RESOURCE_GROUPS='"epprd_rg' '"' NFS_NETWORKS='""' NFS_HOSTS='""' IP_LABELS='"epprd"' +epprd_rg:process_resources(13.796)[1] JOB_TYPE=MOUNT_FILESYSTEMS +epprd_rg:process_resources(13.796)[1] ACTION=ACQUIRE +epprd_rg:process_resources(13.796)[1] FILE_SYSTEMS='/board;/board_org' +epprd_rg:process_resources(13.796)[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources(13.796)[1] NFS_NETWORKS='' +epprd_rg:process_resources(13.796)[1] NFS_HOSTS='' +epprd_rg:process_resources(13.796)[1] IP_LABELS=epprd +epprd_rg:process_resources(13.796)[3330] RC=0 +epprd_rg:process_resources(13.796)[3331] set +a +epprd_rg:process_resources(13.796)[3333] (( 0 != 0 )) +epprd_rg:process_resources(13.796)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(13.796)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(13.796)[3343] export GROUPNAME +epprd_rg:process_resources(13.796)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(13.796)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(13.796)[3360] [[ MOUNT_FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources(13.797)[3360] [[ MOUNT_FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources(13.797)[3612] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources(13.797)[3614] mount_nfs_filesystems MOUNT +epprd_rg:process_resources(13.797)[mount_nfs_filesystems:1437] PS4_FUNC=mount_nfs_filesystems +epprd_rg:process_resources(13.797)[mount_nfs_filesystems:1437] typeset PS4_FUNC +epprd_rg:process_resources(13.797)[mount_nfs_filesystems:1438] [[ high == high ]] +epprd_rg:process_resources(13.797)[mount_nfs_filesystems:1438] set -x +epprd_rg:process_resources(13.797)[mount_nfs_filesystems:1440] post_event_member=FALSE +epprd_rg:process_resources(13.797)[mount_nfs_filesystems:1444] [[ epprda == epprda ]] +epprd_rg:process_resources(13.797)[mount_nfs_filesystems:1446] post_event_member=TRUE +epprd_rg:process_resources(13.797)[mount_nfs_filesystems:1447] break +epprd_rg:process_resources(13.797)[mount_nfs_filesystems:1452] : This node will not be in the resource group so do not mount filesystems. +epprd_rg:process_resources(13.797)[mount_nfs_filesystems:1454] [[ TRUE == FALSE ]] +epprd_rg:process_resources(13.797)[mount_nfs_filesystems:1459] STAT=0 +epprd_rg:process_resources(13.797)[mount_nfs_filesystems:1463] export GROUPNAME +epprd_rg:process_resources(13.798)[mount_nfs_filesystems:1465] get_list_head '/board;/board_org' +epprd_rg:process_resources(13.798)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(13.798)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(13.798)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(13.798)[get_list_head:60] set -x +epprd_rg:process_resources(13.799)[get_list_head:61] echo '/board;/board_org' +epprd_rg:process_resources(13.801)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(13.801)[get_list_head:61] IFS=: +epprd_rg:process_resources(13.802)[get_list_head:62] echo '/board;/board_org' +epprd_rg:process_resources(13.803)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(13.801)[mount_nfs_filesystems:1465] read LIST_OF_FILE_SYSTEMS_FOR_RG +epprd_rg:process_resources(13.808)[mount_nfs_filesystems:1466] get_list_tail '/board;/board_org' +epprd_rg:process_resources(13.808)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(13.808)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(13.808)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(13.809)[get_list_tail:68] set -x +epprd_rg:process_resources(13.809)[get_list_tail:69] echo '/board;/board_org' +epprd_rg:process_resources(13.812)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(13.812)[get_list_tail:69] IFS=: +epprd_rg:process_resources(13.812)[get_list_tail:70] echo +epprd_rg:process_resources(13.811)[mount_nfs_filesystems:1466] read FILE_SYSTEMS +epprd_rg:process_resources(13.814)[mount_nfs_filesystems:1468] get_list_head +epprd_rg:process_resources(13.814)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(13.814)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(13.814)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(13.814)[get_list_head:60] set -x +epprd_rg:process_resources(13.815)[get_list_head:61] echo +epprd_rg:process_resources(13.817)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(13.817)[get_list_head:61] IFS=: +epprd_rg:process_resources(13.818)[get_list_head:62] echo +epprd_rg:process_resources(13.820)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(13.817)[mount_nfs_filesystems:1468] read NFS_HOST +epprd_rg:process_resources(13.825)[mount_nfs_filesystems:1469] get_list_tail +epprd_rg:process_resources(13.825)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(13.825)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(13.825)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(13.825)[get_list_tail:68] set -x +epprd_rg:process_resources(13.826)[get_list_tail:69] echo +epprd_rg:process_resources(13.827)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(13.827)[get_list_tail:69] IFS=: +epprd_rg:process_resources(13.828)[get_list_tail:70] echo +epprd_rg:process_resources(13.829)[mount_nfs_filesystems:1469] read NFS_HOSTS +epprd_rg:process_resources(13.830)[mount_nfs_filesystems:1471] get_list_head +epprd_rg:process_resources(13.830)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(13.830)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(13.830)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(13.830)[get_list_head:60] set -x +epprd_rg:process_resources(13.831)[get_list_head:61] echo +epprd_rg:process_resources(13.833)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(13.833)[get_list_head:61] IFS=: +epprd_rg:process_resources(13.834)[get_list_head:62] echo +epprd_rg:process_resources(13.835)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(13.833)[mount_nfs_filesystems:1471] read NFS_NETWORK +epprd_rg:process_resources(13.838)[mount_nfs_filesystems:1472] get_list_tail +epprd_rg:process_resources(13.839)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(13.839)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(13.839)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(13.839)[get_list_tail:68] set -x +epprd_rg:process_resources(13.840)[get_list_tail:69] echo +epprd_rg:process_resources(13.843)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(13.843)[get_list_tail:69] IFS=: +epprd_rg:process_resources(13.843)[get_list_tail:70] echo +epprd_rg:process_resources(13.843)[mount_nfs_filesystems:1472] read NFS_NETWORKS +epprd_rg:process_resources(13.846)[mount_nfs_filesystems:1474] get_list_head epprd +epprd_rg:process_resources(13.846)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(13.846)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(13.846)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(13.846)[get_list_head:60] set -x +epprd_rg:process_resources(13.848)[get_list_head:61] echo epprd +epprd_rg:process_resources(13.848)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(13.848)[get_list_head:61] IFS=: +epprd_rg:process_resources(13.850)[get_list_head:62] echo epprd +epprd_rg:process_resources(13.851)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(13.847)[mount_nfs_filesystems:1474] read LIST_OF_IP_LABELS_FOR_RG +epprd_rg:process_resources(13.856)[mount_nfs_filesystems:1475] get_list_tail epprd +epprd_rg:process_resources(13.857)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(13.857)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(13.857)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(13.857)[get_list_tail:68] set -x +epprd_rg:process_resources(13.859)[get_list_tail:69] echo epprd +epprd_rg:process_resources(13.858)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(13.858)[get_list_tail:69] IFS=: +epprd_rg:process_resources(13.860)[get_list_tail:70] echo +epprd_rg:process_resources(13.857)[mount_nfs_filesystems:1475] read IP_LABELS +epprd_rg:process_resources(13.861)[mount_nfs_filesystems:1477] MOUNT_FILESYSTEM='/board;/board_org' +epprd_rg:process_resources(13.861)[mount_nfs_filesystems:1478] NFSMOUNT_LABEL=epprd +epprd_rg:process_resources(13.861)[mount_nfs_filesystems:1481] : Do the required NFS_mounts. +epprd_rg:process_resources(13.861)[mount_nfs_filesystems:1484] NW_NFSMOUNT_LABEL='' +epprd_rg:process_resources(13.861)[mount_nfs_filesystems:1486] [[ -z '' ]] +epprd_rg:process_resources(13.861)[mount_nfs_filesystems:1488] NFS_HOST=epprda +epprd_rg:process_resources(13.861)[mount_nfs_filesystems:1491] NFSHOST='' +epprd_rg:process_resources(13.861)[mount_nfs_filesystems:1492] [[ -n epprda ]] +epprd_rg:process_resources(13.861)[mount_nfs_filesystems:1494] [[ -n '' ]] +epprd_rg:process_resources(13.861)[mount_nfs_filesystems:1516] [[ MOUNT == REMOUNT ]] +epprd_rg:process_resources(13.861)[mount_nfs_filesystems:1526] ping epprd 1024 1 +epprd_rg:process_resources(13.862)[mount_nfs_filesystems:1526] 1> /dev/null +epprd_rg:process_resources(13.865)[mount_nfs_filesystems:1528] NFSHOST=epprd +epprd_rg:process_resources(13.865)[mount_nfs_filesystems:1529] break +epprd_rg:process_resources(13.865)[mount_nfs_filesystems:1533] [[ -n epprd ]] +epprd_rg:process_resources(13.865)[mount_nfs_filesystems:1536] : activate_nfs will not wait for the mounts to complete +epprd_rg:process_resources(13.866)[mount_nfs_filesystems:1538] cl_activate_nfs 1 epprd '/board;/board_org' +epprd_rg:cl_activate_nfs[68] [[ high == high ]] +epprd_rg:cl_activate_nfs[68] version='1.19.4.2 $Source$' +epprd_rg:cl_activate_nfs[70] . /usr/es/sbin/cluster/events/utils/cl_nfs_utils +epprd_rg:cl_activate_nfs[98] PROGNAME=cl_activate_nfs +epprd_rg:cl_activate_nfs[99] [[ high == high ]] +epprd_rg:cl_activate_nfs[101] set -x +epprd_rg:cl_activate_nfs[102] version=%I +epprd_rg:cl_activate_nfs[105] cl_exports_data='' +epprd_rg:cl_activate_nfs[105] typeset cl_exports_data +epprd_rg:cl_activate_nfs[106] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_activate_nfs[72] set -u +epprd_rg:cl_activate_nfs[242] grep -w ^MOUNT_WLMCNTRL_SELFMANAGE /etc/environment +epprd_rg:cl_activate_nfs[242] export eval +epprd_rg:cl_activate_nfs[244] (( 3 < 3 )) +epprd_rg:cl_activate_nfs[253] ATTEMPTS=1 +epprd_rg:cl_activate_nfs[253] typeset -li ATTEMPTS +epprd_rg:cl_activate_nfs[254] HOST=epprd +epprd_rg:cl_activate_nfs[256] shift 2 +epprd_rg:cl_activate_nfs[261] FILELIST='/board;/board_org' +epprd_rg:cl_activate_nfs[266] print '/board;/board_org' +epprd_rg:cl_activate_nfs[266] grep -q '\;/' +epprd_rg:cl_activate_nfs[271] CROSSMOUNTS=TRUE +epprd_rg:cl_activate_nfs[272] print '/board;/board_org' +epprd_rg:cl_activate_nfs[272] /bin/sort -k 1,1 '-t;' +epprd_rg:cl_activate_nfs[272] tr ' ' '\n' +epprd_rg:cl_activate_nfs[272] MOUNTLIST='/board;/board_org' +epprd_rg:cl_activate_nfs[281] ALLNFS=All_nfs_mounts +epprd_rg:cl_activate_nfs[282] cl_RMupdate resource_acquiring All_nfs_mounts cl_activate_nfs 2023-01-28T18:03:52.920692 2023-01-28T18:03:52.924969 +epprd_rg:cl_activate_nfs[288] odmget -q name='RECOVERY_METHOD AND group=epprd_rg' HACMPresource +epprd_rg:cl_activate_nfs[289] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_activate_nfs[288] METHOD=sequential +epprd_rg:cl_activate_nfs[291] odmget -q name='EXPORT_FILESYSTEM AND group=epprd_rg' HACMPresource +epprd_rg:cl_activate_nfs[291] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_activate_nfs[291] EXPORT_FILESYSTEM=$'/board_org\n/sapmnt/EPP' +epprd_rg:cl_activate_nfs[293] odmget -q name='EXPORT_FILESYSTEM_V4 AND group=epprd_rg' HACMPresource +epprd_rg:cl_activate_nfs[293] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_activate_nfs[293] EXPORT_FILESYSTEM_V4='' +epprd_rg:cl_activate_nfs[302] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_activate_nfs[304] [[ -z '' ]] +epprd_rg:cl_activate_nfs[305] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_activate_nfs[308] VERSION_SOURCE=FILES +epprd_rg:cl_activate_nfs[320] [[ FILES == FILES ]] +epprd_rg:cl_activate_nfs[322] export_v3='' +epprd_rg:cl_activate_nfs[323] export_v4='' +epprd_rg:cl_activate_nfs[330] getline_exports /board_org +epprd_rg:cl_activate_nfs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_activate_nfs[getline_exports:45] line='' +epprd_rg:cl_activate_nfs[getline_exports:45] typeset line +epprd_rg:cl_activate_nfs[getline_exports:46] flag=0 +epprd_rg:cl_activate_nfs[getline_exports:46] typeset -i flag +epprd_rg:cl_activate_nfs[getline_exports:47] fs=/board_org +epprd_rg:cl_activate_nfs[getline_exports:49] [[ -z /board_org ]] +epprd_rg:cl_activate_nfs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_activate_nfs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_activate_nfs[getline_exports:56] read -r line +epprd_rg:cl_activate_nfs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_activate_nfs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_activate_nfs[getline_exports:69] (( 1 == 0 )) +epprd_rg:cl_activate_nfs[getline_exports:74] [[ 0 == 1 ]] +epprd_rg:cl_activate_nfs[getline_exports:56] read -r line +epprd_rg:cl_activate_nfs[getline_exports:89] return 0 +epprd_rg:cl_activate_nfs[331] export_line='' +epprd_rg:cl_activate_nfs[336] echo +epprd_rg:cl_activate_nfs[336] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_activate_nfs[337] cut -d- -f2- +epprd_rg:cl_activate_nfs[337] tr , ' ' +epprd_rg:cl_activate_nfs[336] options='' +epprd_rg:cl_activate_nfs[343] vers_missing=1 +epprd_rg:cl_activate_nfs[366] (( vers_missing )) +epprd_rg:cl_activate_nfs[366] export_v3=' /board_org' +epprd_rg:cl_activate_nfs[330] getline_exports /sapmnt/EPP +epprd_rg:cl_activate_nfs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_activate_nfs[getline_exports:45] line='' +epprd_rg:cl_activate_nfs[getline_exports:45] typeset line +epprd_rg:cl_activate_nfs[getline_exports:46] flag=0 +epprd_rg:cl_activate_nfs[getline_exports:46] typeset -i flag +epprd_rg:cl_activate_nfs[getline_exports:47] fs=/sapmnt/EPP +epprd_rg:cl_activate_nfs[getline_exports:49] [[ -z /sapmnt/EPP ]] +epprd_rg:cl_activate_nfs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_activate_nfs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_activate_nfs[getline_exports:56] read -r line +epprd_rg:cl_activate_nfs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_activate_nfs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:68] grep -q '^[[:space:]]*/sapmnt/EPP[[:space:]]' +epprd_rg:cl_activate_nfs[getline_exports:69] (( 0 == 0 )) +epprd_rg:cl_activate_nfs[getline_exports:71] flag=1 +epprd_rg:cl_activate_nfs[getline_exports:74] [[ 1 == 1 ]] +epprd_rg:cl_activate_nfs[getline_exports:76] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:76] grep -w '\\' +epprd_rg:cl_activate_nfs[getline_exports:76] [[ -n '' ]] +epprd_rg:cl_activate_nfs[getline_exports:81] cl_exports_data=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:82] break +epprd_rg:cl_activate_nfs[getline_exports:89] return 0 +epprd_rg:cl_activate_nfs[331] export_line=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[336] echo /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_activate_nfs[336] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_activate_nfs[337] cut -d- -f2- +epprd_rg:cl_activate_nfs[337] tr , ' ' +epprd_rg:cl_activate_nfs[336] options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap root=epprdap ' +epprd_rg:cl_activate_nfs[343] vers_missing=1 +epprd_rg:cl_activate_nfs[366] (( vers_missing )) +epprd_rg:cl_activate_nfs[366] export_v3=' /board_org /sapmnt/EPP' +epprd_rg:cl_activate_nfs[369] EXPORT_FILESYSTEM=' /board_org /sapmnt/EPP' +epprd_rg:cl_activate_nfs[370] EXPORT_FILESYSTEM_V4='' +epprd_rg:cl_activate_nfs[377] [[ -x /usr/sbin/nfsrgyd ]] +epprd_rg:cl_activate_nfs[378] [[ -n '' ]] +epprd_rg:cl_activate_nfs[379] grep -q vers=4 /etc/filesystems +epprd_rg:cl_activate_nfs[394] [[ TRUE == TRUE ]] +epprd_rg:cl_activate_nfs[411] filesystem=/board_org +epprd_rg:cl_activate_nfs[412] mountpoint=/board +epprd_rg:cl_activate_nfs:/board;/board_org[429] PS4_LOOP='/board;/board_org' +epprd_rg:cl_activate_nfs:/board;/board_org[430] [[ sequential == sequential ]] +epprd_rg:cl_activate_nfs:/board;/board_org[432] nfs_mount 1 epprd /board_org /board +epprd_rg:cl_activate_nfs(0.114):/board;/board_org[nfs_mount:99] (( 4 != 4 )) +epprd_rg:cl_activate_nfs(0.114):/board;/board_org[nfs_mount:108] LIMIT=1 +epprd_rg:cl_activate_nfs(0.114):/board;/board_org[nfs_mount:108] typeset -li LIMIT +epprd_rg:cl_activate_nfs(0.114):/board;/board_org[nfs_mount:109] HOST=epprd +epprd_rg:cl_activate_nfs(0.114):/board;/board_org[nfs_mount:110] FileSystem=/board_org +epprd_rg:cl_activate_nfs(0.114):/board;/board_org[nfs_mount:111] MountPoint=/board +epprd_rg:cl_activate_nfs(0.115):/board;/board_org[nfs_mount:116] mount +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:119] [[ mounted == /board ]] +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:119] [[ --------------- == /board ]] +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:119] [[ procfs == /board ]] +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:119] [[ ahafs == /board ]] +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:119] [[ /sapcd == /board ]] +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.117):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:126] vers='' +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:127] [[ FILES == ODM ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:141] lsfs -c -v nfs +epprd_rg:cl_activate_nfs(0.122):/board;/board_org[nfs_mount:141] grep ^/board: +epprd_rg:cl_activate_nfs(0.124):/board;/board_org[nfs_mount:141] cut -d: -f7 +epprd_rg:cl_activate_nfs(0.127):/board;/board_org[nfs_mount:141] OPTIONS=bg,soft,intr,sec=sys,rw +epprd_rg:cl_activate_nfs(0.129):/board;/board_org[nfs_mount:142] echo bg,soft,intr,sec=sys,rw +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:142] sed s/+/:/g +epprd_rg:cl_activate_nfs(0.133):/board;/board_org[nfs_mount:142] OPTIONS=bg,soft,intr,sec=sys,rw +epprd_rg:cl_activate_nfs(0.133):/board;/board_org[nfs_mount:144] [[ -z bg,soft,intr,sec=sys,rw ]] +epprd_rg:cl_activate_nfs(0.134):/board;/board_org[nfs_mount:152] print bg,soft,intr,sec=sys,rw +epprd_rg:cl_activate_nfs(0.135):/board;/board_org[nfs_mount:152] grep -q intr +epprd_rg:cl_activate_nfs(0.137):/board;/board_org[nfs_mount:168] [[ -n '' ]] +epprd_rg:cl_activate_nfs(0.137):/board;/board_org[nfs_mount:175] [[ sequential == sequential ]] +epprd_rg:cl_activate_nfs(0.139):/board;/board_org[nfs_mount:177] print bg,soft,intr,sec=sys,rw +epprd_rg:cl_activate_nfs(0.140):/board;/board_org[nfs_mount:177] sed s/bg/fg/g +epprd_rg:cl_activate_nfs(0.143):/board;/board_org[nfs_mount:177] OPTIONS=fg,soft,intr,sec=sys,rw +epprd_rg:cl_activate_nfs(0.143):/board;/board_org[nfs_mount:178] let LIMIT+=4 +epprd_rg:cl_activate_nfs(0.143):/board;/board_org[nfs_mount:184] typeset RC +epprd_rg:cl_activate_nfs(0.143):/board;/board_org[nfs_mount:186] amlog_trace '' 'Activating NFS|/board_org' +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:319] cltime +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:319] DATE=2023-01-28T18:03:53.046472 +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:320] echo '|2023-01-28T18:03:53.046472|INFO: Activating NFS|/board_org' +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_nfs(0.172):/board;/board_org[nfs_mount:187] (( TRIES=0)) +epprd_rg:cl_activate_nfs(0.172):/board;/board_org[nfs_mount:187] (( TRIES' 'Activating NFS|/board_org' +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:319] cltime +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:319] DATE=2023-01-28T18:04:03.095921 +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:320] echo '|2023-01-28T18:04:03.095921|INFO: Activating NFS|/board_org' +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_nfs(10.222):/board;/board_org[nfs_mount:203] return 0 +epprd_rg:process_resources(24.092)[mount_nfs_filesystems:1540] RC=0 +epprd_rg:process_resources(24.092)[mount_nfs_filesystems:1541] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources(24.092)[mount_nfs_filesystems:1549] (( 0 != 0 )) +epprd_rg:process_resources(24.092)[mount_nfs_filesystems:1565] return 0 +epprd_rg:process_resources(24.092)[3324] true +epprd_rg:process_resources(24.092)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(24.092)[3328] set -a +epprd_rg:process_resources(24.092)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:04:03.108967 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(24.105)[3329] eval JOB_TYPE=NONE +epprd_rg:process_resources(24.105)[1] JOB_TYPE=NONE +epprd_rg:process_resources(24.105)[3330] RC=0 +epprd_rg:process_resources(24.105)[3331] set +a +epprd_rg:process_resources(24.105)[3333] (( 0 != 0 )) +epprd_rg:process_resources(24.105)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(24.105)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(24.105)[3343] export GROUPNAME +epprd_rg:process_resources(24.105)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(24.105)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(24.105)[3360] [[ NONE == RELEASE ]] +epprd_rg:process_resources(24.105)[3360] [[ NONE == ONLINE ]] +epprd_rg:process_resources(24.105)[3729] break +epprd_rg:process_resources(24.105)[3740] : If sddsrv was turned off above, turn it back on again +epprd_rg:process_resources(24.105)[3742] [[ FALSE == TRUE ]] +epprd_rg:process_resources(24.105)[3747] exit 0 :rg_move[247] : unsetting AM_SYNC_CALLED_BY from $'callers environment as\n: we dont' require it after this point in execution. :rg_move[250] unset AM_SYNC_CALLED_BY :rg_move[253] [[ -f /tmp/.NFSSTOPPED ]] :rg_move[274] [[ -f /tmp/.RPCLOCKDSTOPPED ]] :rg_move[276] rm -f /tmp/.RPCLOCKDSTOPPED :rg_move[277] ATTEMPT=0 :rg_move[277] typeset -li ATTEMPT :rg_move[278] (( ATTEMPT++ < 60 )) :rg_move[280] : rpc.lockd status check :rg_move[281] lssrc -s rpc.lockd :rg_move[281] LC_ALL=C :rg_move[281] grep stopping :rg_move[282] (( 1 == 0 )) :rg_move[282] break :rg_move[285] startsrc -s rpc.lockd 0513-059 The rpc.lockd Subsystem has been started. Subsystem PID is 26214856. :rg_move[286] rcstartsrc=0 :rg_move[287] (( 0 != 0 )) :rg_move[293] exit 0 Jan 28 2023 18:04:03 EVENT COMPLETED: rg_move epprda 1 ACQUIRE 0 |2023-01-28T18:04:03|8561|EVENT COMPLETED: rg_move epprda 1 ACQUIRE 0| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-01-28T18:04:03.225096 :clevlog[amlog_trace:320] echo '|2023-01-28T18:04:03.225096|INFO: rg_move|epprd_rg|epprda|1|ACQUIRE|0' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :rg_move_acquire[+119] exit_status=0 :rg_move_acquire[+120] : exit status of clcallev rg_move epprda 1 ACQUIRE is: 0 :rg_move_acquire[+121] exit 0 Jan 28 2023 18:04:03 EVENT COMPLETED: rg_move_acquire epprda 1 0 |2023-01-28T18:04:03|8561|EVENT COMPLETED: rg_move_acquire epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:04:03.349660 + echo '|2023-01-28T18:04:03.349660|INFO: rg_move_acquire|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 18:04:03 EVENT START: rg_move_complete epprda 1 |2023-01-28T18:04:03|8561|EVENT START: rg_move_complete epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:04:03.543820 + echo '|2023-01-28T18:04:03.543820|INFO: rg_move_complete|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :rg_move_complete[91] version=%I% :rg_move_complete[97] STATUS=0 :rg_move_complete[97] typeset -li STATUS :rg_move_complete[99] [[ -z '' ]] :rg_move_complete[101] EMULATE=REAL :rg_move_complete[104] set -u :rg_move_complete[106] (( 2 < 2 || 2 > 3 )) :rg_move_complete[112] NODENAME=epprda :rg_move_complete[112] export NODENAME :rg_move_complete[113] RGID=1 :rg_move_complete[114] (( 2 == 3 )) :rg_move_complete[118] RGDESTINATION='' :rg_move_complete[122] : serial number for this event is 8561 :rg_move_complete[126] : Interpret resource group ID into a resource group name. :rg_move_complete[128] clodmget -qid=1 -f group -n HACMPgroup :rg_move_complete[128] eval RGNAME=epprd_rg :rg_move_complete[1] RGNAME=epprd_rg +epprd_rg:rg_move_complete[129] GROUPNAME=epprd_rg +epprd_rg:rg_move_complete[131] UPDATESTATD=0 +epprd_rg:rg_move_complete[131] typeset -li UPDATESTATD +epprd_rg:rg_move_complete[132] NFSSTOPPED=0 +epprd_rg:rg_move_complete[132] typeset -li NFSSTOPPED +epprd_rg:rg_move_complete[133] LIMIT=60 +epprd_rg:rg_move_complete[133] WAIT=1 +epprd_rg:rg_move_complete[133] TRY=0 +epprd_rg:rg_move_complete[133] typeset -li LIMIT WAIT TRY +epprd_rg:rg_move_complete[136] : If this is a two node cluster and exported filesystems exist, then +epprd_rg:rg_move_complete[137] : when the cluster topology is stable notify rpc.statd of the changes. +epprd_rg:rg_move_complete[139] clnodename +epprd_rg:rg_move_complete[139] wc -l +epprd_rg:rg_move_complete[139] (( 2 == 2 )) +epprd_rg:rg_move_complete[141] clodmget -f group -n HACMPgroup +epprd_rg:rg_move_complete[141] RESOURCE_GROUPS=epprd_rg +epprd_rg:rg_move_complete[144] clodmget -q group='epprd_rg AND name=EXPORT_FILESYSTEM' -f value -n HACMPresource +epprd_rg:rg_move_complete[144] EXPORTLIST=$'/board_org\n/sapmnt/EPP' +epprd_rg:rg_move_complete[146] [[ -n $'/board_org\n/sapmnt/EPP' ]] +epprd_rg:rg_move_complete[146] [[ epprd_rg == epprd_rg ]] +epprd_rg:rg_move_complete[148] UPDATESTATD=1 +epprd_rg:rg_move_complete[149] [[ REAL == EMUL ]] +epprd_rg:rg_move_complete[154] cl_update_statd :cl_update_statd(0)[+174] version=%I% :cl_update_statd(0)[+176] typeset -i RC=0 :cl_update_statd(0)[+178] LOCAL_FOUND= :cl_update_statd(0)[+179] TWIN_NAME= :cl_update_statd(0)[+180] [[ -z epprda ]] :cl_update_statd(0)[+181] :cl_update_statd(0)[+181] cl_get_path -S OP_SEP=~ :cl_update_statd(0)[+182] set -u :cl_update_statd(0)[+187] LOCAL_FOUND=true :cl_update_statd(0)[+194] : Make sure statd is running locally :cl_update_statd(0)[+196] lssrc -s statd :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw inoperative :cl_update_statd(0)[+196] rpcinfo -p :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw status :cl_update_statd(0)[+207] : Get the current twin, if there is one :cl_update_statd(0)[+209] :cl_update_statd(0)[+209] nfso -H sm_gethost :cl_update_statd(0)[+209] 2>& 1 CURTWIN=epprds :cl_update_statd(0)[+210] RC=0 :cl_update_statd(0)[+212] [[ -z true ]] :cl_update_statd(0)[+212] [[ -z ]] :cl_update_statd(0)[+215] : Local node is no longer a cluster member, unregister its twin :cl_update_statd(0)[+215] [[ -n epprds ]] :cl_update_statd(0)[+219] nfso -H sm_unregister epprds :cl_update_statd(0)[+220] RC=0 :cl_update_statd(0)[+259] : RC is actually 0 :cl_update_statd(0)[+266] return 0 +epprd_rg:rg_move_complete[155] (( 0 != 0 )) +epprd_rg:rg_move_complete[160] break +epprd_rg:rg_move_complete[166] : Set the RESOURCE_GROUPS environment variable with the names +epprd_rg:rg_move_complete[167] : of all resource groups participating in this event, and export +epprd_rg:rg_move_complete[168] : them to all successive scripts. +epprd_rg:rg_move_complete[170] set -a +epprd_rg:rg_move_complete[171] clsetenvgrp epprda rg_move_complete epprd_rg :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move_complete epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 +epprd_rg:rg_move_complete[171] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_complete[172] RC=0 +epprd_rg:rg_move_complete[173] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_complete[1] FORCEDOWN_GROUPS='' +epprd_rg:rg_move_complete[2] RESOURCE_GROUPS='' +epprd_rg:rg_move_complete[3] HOMELESS_GROUPS='' +epprd_rg:rg_move_complete[4] HOMELESS_FOLLOWER_GROUPS='' +epprd_rg:rg_move_complete[5] ERRSTATE_GROUPS='' +epprd_rg:rg_move_complete[6] PRINCIPAL_ACTIONS='' +epprd_rg:rg_move_complete[7] ASSOCIATE_ACTIONS='' +epprd_rg:rg_move_complete[8] AUXILLIARY_ACTIONS='' +epprd_rg:rg_move_complete[8] SIBLING_GROUPS='' +epprd_rg:rg_move_complete[9] SIBLING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[10] SIBLING_ACQUIRING_GROUPS='' +epprd_rg:rg_move_complete[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[12] SIBLING_RELEASING_GROUPS='' +epprd_rg:rg_move_complete[13] SIBLING_RELEASING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[174] set +a +epprd_rg:rg_move_complete[175] (( 0 != 0 )) +epprd_rg:rg_move_complete[182] : For each participating resource group, serially process the resources. +epprd_rg:rg_move_complete[251] (( 1 == 1 )) +epprd_rg:rg_move_complete[253] [[ REAL == EMUL ]] +epprd_rg:rg_move_complete[259] stopsrc -s rpc.lockd 0513-044 The rpc.lockd Subsystem was requested to stop. +epprd_rg:rg_move_complete[260] rcstopsrc=0 +epprd_rg:rg_move_complete[261] (( 0 != 0 )) +epprd_rg:rg_move_complete[266] (( TRY=0)) +epprd_rg:rg_move_complete[266] (( 0<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 1<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 2<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 3<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 4<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 5<60)) +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 6<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 7<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 8<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 9<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 10<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 11<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 12<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 13<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 14<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 15<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 16<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 17<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 18<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 19<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 20<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z '' ]] +epprd_rg:rg_move_complete[273] break +epprd_rg:rg_move_complete[277] [[ ! -z '' ]] +epprd_rg:rg_move_complete[300] : Sure that rpc.lockd stopped. Restart it. +epprd_rg:rg_move_complete[302] startsrc -s rpc.lockd 0513-059 The rpc.lockd Subsystem has been started. Subsystem PID is 22217092. +epprd_rg:rg_move_complete[303] rcstartsrc=0 +epprd_rg:rg_move_complete[304] (( 0 != 0 )) +epprd_rg:rg_move_complete[365] : If the resource group in this rg_move is now homeless, +epprd_rg:rg_move_complete[366] : then we need to put it into an error state. +epprd_rg:rg_move_complete[368] active_node=0 +epprd_rg:rg_move_complete[428] : If the resource group in this rg_move is now homeless_secondary, +epprd_rg:rg_move_complete[429] : then we need to put it into an errorsecondary state. +epprd_rg:rg_move_complete[437] : Set an error state for concurrent groups that have +epprd_rg:rg_move_complete[438] : been brought offline on this node by rg_move. +epprd_rg:rg_move_complete[453] AM_SYNC_CALLED_BY=RG_MOVE_COMPLETE +epprd_rg:rg_move_complete[453] export AM_SYNC_CALLED_BY +epprd_rg:rg_move_complete[454] process_resources :process_resources[3318] version=1.169 :process_resources[3321] STATUS=0 :process_resources[3322] sddsrv_off=FALSE :process_resources[3324] true :process_resources[3326] : call rgpa, and it will tell us what to do next :process_resources[3328] set -a :process_resources[3329] clRGPA :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa 2023-01-28T18:04:23.846159 clrgpa :clRGPA[+55] exit 0 :process_resources[3329] eval JOB_TYPE=SYNC_VGS ACTION=ACQUIRE VOLUME_GROUPS='"datavg"' RESOURCE_GROUPS='"epprd_rg' '"' :process_resources[1] JOB_TYPE=SYNC_VGS :process_resources[1] ACTION=ACQUIRE :process_resources[1] VOLUME_GROUPS=datavg :process_resources[1] RESOURCE_GROUPS='epprd_rg ' :process_resources[3330] RC=0 :process_resources[3331] set +a :process_resources[3333] (( 0 != 0 )) :process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ SYNC_VGS == RELEASE ]] +epprd_rg:process_resources[3360] [[ SYNC_VGS == ONLINE ]] +epprd_rg:process_resources[3474] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[3476] sync_volume_groups +epprd_rg:process_resources[sync_volume_groups:2699] PS4_FUNC=sync_volume_groups +epprd_rg:process_resources[sync_volume_groups:2699] typeset PS4_FUNC +epprd_rg:process_resources[sync_volume_groups:2700] [[ high == high ]] +epprd_rg:process_resources[sync_volume_groups:2700] set -x +epprd_rg:process_resources[sync_volume_groups:2701] STAT=0 +epprd_rg:process_resources[sync_volume_groups:2704] export GROUPNAME +epprd_rg:process_resources[sync_volume_groups:2706] get_list_head datavg +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo datavg +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo datavg +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[sync_volume_groups:2706] read LIST_OF_VOLUME_GROUPS_FOR_RG +epprd_rg:process_resources[sync_volume_groups:2707] get_list_tail datavg +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo datavg +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[sync_volume_groups:2707] read VOLUME_GROUPS +epprd_rg:process_resources[sync_volume_groups:2710] : Sync the active volume groups +epprd_rg:process_resources[sync_volume_groups:2712] lsvg -L -o +epprd_rg:process_resources[sync_volume_groups:2712] sort +epprd_rg:process_resources[sync_volume_groups:2712] 2> /tmp/lsvg.err +epprd_rg:process_resources[sync_volume_groups:2712] 1> /tmp/lsvg.out.26214862 +epprd_rg:process_resources[sync_volume_groups:2713] echo datavg +epprd_rg:process_resources[sync_volume_groups:2713] tr ' ' '\n' +epprd_rg:process_resources[sync_volume_groups:2714] sort +epprd_rg:process_resources[sync_volume_groups:2714] comm -12 /tmp/lsvg.out.26214862 - +epprd_rg:process_resources[sync_volume_groups:2716] cl_sync_vgs datavg +epprd_rg:process_resources[sync_volume_groups:2718] [[ -s /tmp/lsvg.err ]] +epprd_rg:process_resources[sync_volume_groups:2723] rm -f /tmp/lsvg.out.26214862 /tmp/lsvg.err +epprd_rg:cl_sync_vgs[303] version=1.24.1.4 +epprd_rg:cl_sync_vgs[306] (( 1 == 0 )) +epprd_rg:cl_sync_vgs[312] : syncing 4 stale PPs at a time seems to be a win most of the time, but +epprd_rg:cl_sync_vgs[313] : we honor the NUM_PARALLEL_LPS value from /etc/environment, as does +epprd_rg:cl_sync_vgs[314] : syncvg. +epprd_rg:cl_sync_vgs[316] syncflag='' +epprd_rg:cl_sync_vgs[316] export syncflag +epprd_rg:cl_sync_vgs[317] PS4_LOOP='' +epprd_rg:cl_sync_vgs[317] export PS4_LOOP +epprd_rg:cl_sync_vgs[318] typeset -i npl +epprd_rg:cl_sync_vgs[319] grep -q ^NUM_PARALLEL_LPS= /etc/environment +epprd_rg:process_resources[sync_volume_groups:2732] unset AM_SYNC_CALLED_BY +epprd_rg:process_resources[sync_volume_groups:2734] return 0 +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:cl_sync_vgs[321] syncflag=-P4 +epprd_rg:cl_sync_vgs[328] echo 'NOTE: While the sync is going on, volume group can be used' NOTE: While the sync is going on, volume group can be used +epprd_rg:cl_sync_vgs[331] : For GLVM volume groups, read PARALLEL LPS count from HACMPresource if it is set from GUI, +epprd_rg:cl_sync_vgs[332] : else, read from environment variables, if it is not set use 32 as default value. +epprd_rg:cl_sync_vgs[334] clodmget -q name='GMVG_REP_RESOURCE and value=datavg' -f group HACMPresource +epprd_rg:cl_sync_vgs[334] 2> /dev/null +epprd_rg:cl_sync_vgs[334] glvm_rg='' +epprd_rg:cl_sync_vgs[335] [[ -n '' ]] +epprd_rg:cl_sync_vgs[353] check_sync datavg +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:76] typeset vg_name +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:77] typeset vgid +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:78] typeset disklist +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:79] typeset lv_name +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:80] typeset -li stale_count +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:81] typeset -li mode +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:82] RC=0 +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:82] typeset -li RC +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:83] typeset site_node_list +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:84] typeset site_choice +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:86] vg_name=datavg +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:87] disklist='' +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:89] getlvodm -v datavg +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:04:23.932940 clrgpa +epprd_rg:cl_sync_vgs(0.035):datavg[check_sync:89] vgid=00c44af100004b00000001851e9dc053 +epprd_rg:cl_sync_vgs(0.035):datavg[check_sync:92] : find disks in the VG that LVM thinks are inaccessable +epprd_rg:cl_sync_vgs(0.035):datavg[check_sync:94] lsvg -L -p datavg +epprd_rg:cl_sync_vgs(0.036):datavg[check_sync:94] LC_ALL=C +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=APPLICATIONS ACTION=ACQUIRE ALL_APPLICATIONS='"epprd_app"' RESOURCE_GROUPS='"epprd_rg' '"' MISCDATA='""' +epprd_rg:process_resources[1] JOB_TYPE=APPLICATIONS +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] ALL_APPLICATIONS=epprd_app +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] MISCDATA='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ APPLICATIONS == RELEASE ]] +epprd_rg:process_resources[3360] [[ APPLICATIONS == ONLINE ]] +epprd_rg:process_resources[3549] process_applications ACQUIRE +epprd_rg:process_resources[process_applications:312] PS4_FUNC=process_applications +epprd_rg:process_resources[process_applications:312] typeset PS4_FUNC +epprd_rg:process_resources[process_applications:313] [[ high == high ]] +epprd_rg:process_resources[process_applications:313] set -x +epprd_rg:process_resources[process_applications:316] : Each subprocess will log to a file with this name and PID +epprd_rg:process_resources[process_applications:318] TMP_FILE=/var/hacmp/log/.process_resources_applications.26214862 +epprd_rg:process_resources[process_applications:318] export TMP_FILE +epprd_rg:process_resources[process_applications:320] rm -f '/var/hacmp/log/.process_resources_applications*' +epprd_rg:process_resources[process_applications:322] WAITPIDS='' +epprd_rg:process_resources[process_applications:323] LPAR_ACQUIRE_FAILED=0 +epprd_rg:process_resources[process_applications:324] LPAR_RELEASE_FAILED=0 +epprd_rg:process_resources[process_applications:325] START_STOP_FAILED=0 +epprd_rg:process_resources[process_applications:326] LIST_OF_APPS=epprd_app +epprd_rg:process_resources[process_applications:329] : Acquire lpar resources in one-shot before starting applications +epprd_rg:process_resources[process_applications:331] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[process_applications:333] GROUPNAME=epprd_rg +epprd_rg:process_resources[process_applications:333] export GROUPNAME +epprd_rg:process_resources[process_applications:334] clmanageroha -o acquire -s -l epprd_app +epprd_rg:process_resources[process_applications:334] 3>& 2 +epprd_rg:clmanageroha[318] : version='@(#)' 5881272 43haes/usr/sbin/cluster/events/clmanageroha.sh, 61aha_r726, 2205A_aha726, May 16 2022 12:15 PM +epprd_rg:clmanageroha[321] clodmget -n -f connection_type HACMPhmcparam +epprd_rg:clmanageroha[321] CONN_TYPE=0 +epprd_rg:clmanageroha[321] typeset -i CONN_TYPE +epprd_rg:clmanageroha[323] clodmget -q name='epprda and object like POWERVS_*' -nf name HACMPnode +epprd_rg:clmanageroha[323] 2> /dev/null +epprd_rg:clmanageroha[323] [[ -n '' ]] +epprd_rg:clmanageroha[326] export CONN_TYPE +epprd_rg:clmanageroha[331] roha_session_open -o acquire -s -l epprd_app +epprd_rg:clmanageroha[roha_session_open:131] roha_session.id=18743582 +epprd_rg:clmanageroha[roha_session_open:132] date +epprd_rg:clmanageroha[roha_session_open:132] LC_ALL=C +epprd_rg:clmanageroha[roha_session_open:132] roha_session_log 'Open session 18743582 at Sat Jan 28 18:04:24 KORST 2023' [ROHALOG:18743582:(0.073)] Open session 18743582 at Sat Jan 28 18:04:24 KORST 2023 +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:146] roha_session.operation=acquire +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:143] roha_session.systemmirror_mode=1 +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:149] roha_session.optimal_apps=epprd_app +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:163] [[ acquire != @(acquire|release|adjust) ]] +epprd_rg:clmanageroha[roha_session_open:168] no_roha_apps=0 +epprd_rg:clmanageroha[roha_session_open:168] typeset -i no_roha_apps +epprd_rg:clmanageroha[roha_session_open:169] need_explicit_res_rel=0 +epprd_rg:clmanageroha[roha_session_open:169] typeset -i need_explicit_res_rel +epprd_rg:clmanageroha[roha_session_open:187] [[ -n epprd_app ]] +epprd_rg:clmanageroha[roha_session_open:187] clmgr q roha +epprd_rg:clmanageroha[roha_session_open:187] sort +epprd_rg:clmanageroha[roha_session_open:187] uniq -d +epprd_rg:cl_sync_vgs(0.202):datavg[check_sync:94] disklist=$'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:cl_sync_vgs(0.204):datavg[check_sync:95] print -- $'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:cl_sync_vgs(0.208):datavg[check_sync:95] grep -w missing +epprd_rg:cl_sync_vgs(0.212):datavg[check_sync:95] cut -f1 '-d ' +epprd_rg:cl_sync_vgs(0.225):datavg[check_sync:95] missing_disklist='' +epprd_rg:cl_sync_vgs(0.227):datavg[check_sync:96] print -- $'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:cl_sync_vgs(0.231):datavg[check_sync:96] grep -w removed +epprd_rg:cl_sync_vgs(0.235):datavg[check_sync:96] cut -f1 '-d ' +epprd_rg:cl_sync_vgs(0.247):datavg[check_sync:96] removed_disklist='' +epprd_rg:cl_sync_vgs(0.247):datavg[check_sync:100] : Proceeed if there are some disks that LVM thinks are inaccessable +epprd_rg:cl_sync_vgs(0.247):datavg[check_sync:102] [[ -n '' ]] +epprd_rg:cl_sync_vgs(0.247):datavg[check_sync:102] [[ -n '' ]] +epprd_rg:cl_sync_vgs(0.247):datavg[check_sync:196] : sync if any LVs in the VG that have stale partitions +epprd_rg:cl_sync_vgs(0.247):datavg[check_sync:198] (( 0 == 0 )) +epprd_rg:cl_sync_vgs(0.247):datavg[check_sync:201] : A status of 2,3,5 or 7 indicates the presence of dirty and/or stale partitions +epprd_rg:cl_sync_vgs(0.247):datavg[check_sync:213] is_start_logged=0 +epprd_rg:cl_sync_vgs(0.247):datavg[check_sync:218] at_least_one_sync_success=0 +epprd_rg:cl_sync_vgs(0.248):datavg[check_sync:219] lqueryvg -g 00c44af100004b00000001851e9dc053 -L +epprd_rg:cl_sync_vgs(0.252):datavg[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.317):datavg[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.317):datavg.epprdaloglv[check_sync:221] PS4_LOOP=datavg.epprdaloglv +epprd_rg:cl_sync_vgs(0.317):datavg.epprdaloglv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.317):datavg.epprdaloglv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.317):datavg.epprdaloglv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.317):datavg.epprdaloglv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.318):datavg.epprdaloglv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.318):datavg.saplv[check_sync:221] PS4_LOOP=datavg.saplv +epprd_rg:cl_sync_vgs(0.318):datavg.saplv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.318):datavg.saplv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.318):datavg.saplv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.318):datavg.saplv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.318):datavg.saplv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.318):datavg.sapmntlv[check_sync:221] PS4_LOOP=datavg.sapmntlv +epprd_rg:cl_sync_vgs(0.318):datavg.sapmntlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.318):datavg.sapmntlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.318):datavg.sapmntlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.318):datavg.sapmntlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.318):datavg.sapmntlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.318):datavg.oraclelv[check_sync:221] PS4_LOOP=datavg.oraclelv +epprd_rg:cl_sync_vgs(0.318):datavg.oraclelv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.318):datavg.oraclelv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.318):datavg.oraclelv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.318):datavg.oraclelv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.318):datavg.oraclelv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.318):datavg.epplv[check_sync:221] PS4_LOOP=datavg.epplv +epprd_rg:cl_sync_vgs(0.318):datavg.epplv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.318):datavg.epplv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.318):datavg.epplv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.318):datavg.epplv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.318):datavg.epplv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.318):datavg.oraarchlv[check_sync:221] PS4_LOOP=datavg.oraarchlv +epprd_rg:cl_sync_vgs(0.318):datavg.oraarchlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.318):datavg.oraarchlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.318):datavg.oraarchlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.318):datavg.oraarchlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.318):datavg.oraarchlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.318):datavg.sapdata1lv[check_sync:221] PS4_LOOP=datavg.sapdata1lv +epprd_rg:cl_sync_vgs(0.318):datavg.sapdata1lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.318):datavg.sapdata1lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.318):datavg.sapdata1lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.318):datavg.sapdata1lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.318):datavg.sapdata1lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.318):datavg.sapdata2lv[check_sync:221] PS4_LOOP=datavg.sapdata2lv +epprd_rg:cl_sync_vgs(0.318):datavg.sapdata2lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.318):datavg.sapdata2lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.318):datavg.sapdata2lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.318):datavg.sapdata2lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.318):datavg.sapdata2lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.318):datavg.sapdata3lv[check_sync:221] PS4_LOOP=datavg.sapdata3lv +epprd_rg:cl_sync_vgs(0.318):datavg.sapdata3lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.318):datavg.sapdata3lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.318):datavg.sapdata3lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.318):datavg.sapdata3lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.318):datavg.sapdata3lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.318):datavg.sapdata4lv[check_sync:221] PS4_LOOP=datavg.sapdata4lv +epprd_rg:cl_sync_vgs(0.318):datavg.sapdata4lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.318):datavg.sapdata4lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.318):datavg.sapdata4lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.318):datavg.sapdata4lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.318):datavg.sapdata4lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.318):datavg.boardlv[check_sync:221] PS4_LOOP=datavg.boardlv +epprd_rg:cl_sync_vgs(0.318):datavg.boardlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.318):datavg.boardlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.318):datavg.boardlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.318):datavg.boardlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.318):datavg.boardlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.318):datavg.origlogAlv[check_sync:221] PS4_LOOP=datavg.origlogAlv +epprd_rg:cl_sync_vgs(0.318):datavg.origlogAlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.318):datavg.origlogAlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.318):datavg.origlogAlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.318):datavg.origlogAlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.318):datavg.origlogAlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.318):datavg.origlogBlv[check_sync:221] PS4_LOOP=datavg.origlogBlv +epprd_rg:cl_sync_vgs(0.318):datavg.origlogBlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.318):datavg.origlogBlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.318):datavg.origlogBlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.318):datavg.origlogBlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.318):datavg.origlogBlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.318):datavg.mirrlogAlv[check_sync:221] PS4_LOOP=datavg.mirrlogAlv +epprd_rg:cl_sync_vgs(0.318):datavg.mirrlogAlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.318):datavg.mirrlogAlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.318):datavg.mirrlogAlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.318):datavg.mirrlogAlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.318):datavg.mirrlogAlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.318):datavg.mirrlogBlv[check_sync:221] PS4_LOOP=datavg.mirrlogBlv +epprd_rg:cl_sync_vgs(0.318):datavg.mirrlogBlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.318):datavg.mirrlogBlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.318):datavg.mirrlogBlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.318):datavg.mirrlogBlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.319):datavg.mirrlogBlv[check_sync:268] [[ -n RG_MOVE_COMPLETE ]] +epprd_rg:cl_sync_vgs(0.319):datavg.mirrlogBlv[check_sync:268] (( 0 == 1 )) +epprd_rg:cl_sync_vgs[355] exit 0 +epprd_rg:clmanageroha[roha_session_open:187] echo epprd_app +epprd_rg:clmanageroha[roha_session_open:187] sort -u +epprd_rg:clmanageroha[roha_session_open:187] echo '\nepprd_app' +epprd_rg:clmanageroha[roha_session_open:187] [[ -z '' ]] +epprd_rg:clmanageroha[roha_session_open:189] roha_session_log 'INFO: No ROHA configured on applications.\n' [ROHALOG:18743582:(0.557)] INFO: No ROHA configured on applications. [ROHALOG:18743582:(0.557)] +epprd_rg:clmanageroha[roha_session_open:190] no_roha_apps=1 +epprd_rg:clmanageroha[roha_session_open:195] read_tunables +epprd_rg:clmanageroha[roha_session_open:196] echo '' +epprd_rg:clmanageroha[roha_session_open:196] grep -q epprda +epprd_rg:clmanageroha[roha_session_open:197] (( 1 == 0 )) +epprd_rg:clmanageroha[roha_session_open:202] (( 1 == 1 )) +epprd_rg:clmanageroha[roha_session_open:203] roha_session_read_odm_dynresop DLPAR_MEM +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_MEM -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:203] (( 0 == 0.00 )) +epprd_rg:clmanageroha[roha_session_open:204] roha_session_read_odm_dynresop DLPAR_PROCS +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_PROCS -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:204] (( 0 == 0 )) +epprd_rg:clmanageroha[roha_session_open:205] roha_session_read_odm_dynresop DLPAR_PROC_UNITS +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_PROC_UNITS -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:205] (( 0 == 0.00 )) +epprd_rg:clmanageroha[roha_session_open:206] roha_session_log 'INFO: Nothing to be done.\n' [ROHALOG:18743582:(0.615)] INFO: Nothing to be done. [ROHALOG:18743582:(0.615)] +epprd_rg:clmanageroha[roha_session_open:207] exit 0 +epprd_rg:process_resources[process_applications:335] RC=0 +epprd_rg:process_resources[process_applications:336] (( 0 != 0 )) +epprd_rg:process_resources[process_applications:343] (( LPAR_ACQUIRE_FAILED == 0 )) +epprd_rg:process_resources[process_applications:345] : Loop through all groups to start or stop applications +epprd_rg:process_resources[process_applications:348] export GROUPNAME +epprd_rg:process_resources[process_applications:351] : Break out application data +epprd_rg:process_resources[process_applications:353] get_list_head epprd_app +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo epprd_app +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo epprd_app +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[process_applications:353] read LIST_OF_APPLICATIONS_FOR_RG +epprd_rg:process_resources[process_applications:354] get_list_tail epprd_app +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo epprd_app +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[process_applications:354] read ALL_APPLICATIONS +epprd_rg:process_resources[process_applications:356] get_list_head +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[process_applications:356] read MISCDATA_FOR_RG +epprd_rg:process_resources[process_applications:357] get_list_tail +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[process_applications:357] read MISCDATA +epprd_rg:process_resources[process_applications:359] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[process_applications:374] APPLICATIONS=epprd_app +epprd_rg:process_resources[process_applications:374] export APPLICATIONS +epprd_rg:process_resources[process_applications:375] MISC_DATA='' +epprd_rg:process_resources[process_applications:375] export MISC_DATA +epprd_rg:process_resources[process_applications:378] : Now call start_or_stop_applications_for_rg to do the app start/stop. +epprd_rg:process_resources[process_applications:381] start_or_stop_applications_for_rg ACQUIRE /var/hacmp/log/.process_resources_applications.26214862.epprd_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:248] PS4_FUNC=start_or_stop_applications_for_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:248] typeset PS4_FUNC +epprd_rg:process_resources[start_or_stop_applications_for_rg:249] [[ high == high ]] +epprd_rg:process_resources[start_or_stop_applications_for_rg:249] set -x +epprd_rg:process_resources[start_or_stop_applications_for_rg:251] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[start_or_stop_applications_for_rg:253] cmd_to_execute=start_server +epprd_rg:process_resources[start_or_stop_applications_for_rg:259] : File name to store our exit status +epprd_rg:process_resources[start_or_stop_applications_for_rg:261] STATUS_FILE=/var/hacmp/log/.process_resources_applications.26214862.epprd_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:264] : Use clcallev to run the event +epprd_rg:process_resources[start_or_stop_applications_for_rg:266] clcallev start_server epprd_app +epprd_rg:process_resources[process_applications:384] : Add PID of the last bg start_or_stop_applications_for_rg process to WAITPIDS. +epprd_rg:process_resources[process_applications:386] WAITPIDS=' 25821442' +epprd_rg:process_resources[process_applications:390] : Wait for the start_or_stop_applications_for_rg PIDs to finish. +epprd_rg:process_resources[process_applications:393] wait 25821442 Jan 28 2023 18:04:24 EVENT START: start_server epprd_app |2023-01-28T18:04:24|8561|EVENT START: start_server epprd_app| +epprd_rg:start_server[+206] version=%I% +epprd_rg:start_server[+210] export TMP_FILE=/var/hacmp/log/.start_server.18743590 +epprd_rg:start_server[+211] export DCD=/etc/es/objrepos +epprd_rg:start_server[+212] export ACD=/usr/es/sbin/cluster/etc/objrepos/active +epprd_rg:start_server[+214] rm -f /var/hacmp/log/.start_server.18743590 +epprd_rg:start_server[+216] STATUS=0 +epprd_rg:start_server[+220] PROC_RES=false +epprd_rg:start_server[+224] [[ APPLICATIONS != 0 ]] +epprd_rg:start_server[+224] [[ APPLICATIONS != GROUP ]] +epprd_rg:start_server[+225] PROC_RES=true +epprd_rg:start_server[+228] set -u +epprd_rg:start_server[+229] typeset WPARNAME EXEC WPARDIR +epprd_rg:start_server[+230] export WPARNAME EXEC WPARDIR +epprd_rg:start_server[+232] EXEC= +epprd_rg:start_server[+233] WPARNAME= +epprd_rg:start_server[+234] WPARDIR= +epprd_rg:start_server[+237] ALLSERVERS=All_servers +epprd_rg:start_server[+238] ALLNOERRSERV=All_nonerror_servers +epprd_rg:start_server[+239] cl_RMupdate resource_acquiring All_servers start_server 2023-01-28T18:04:24.697649 2023-01-28T18:04:24.701989 +epprd_rg:start_server[+241] +epprd_rg:start_server[+241] clwparname epprd_rg +epprd_rg:clwparname[38] version=1.3.1.1 +epprd_rg:clwparname[44] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clwparname[44] [[ -z '' ]] +epprd_rg:clwparname[44] exit 0 WPARNAME= +epprd_rg:start_server[+243] (( 0 == 0 )) +epprd_rg:start_server[+243] [[ -n ]] +epprd_rg:start_server[+258] start_and_monitor_server epprd_app +epprd_rg:start_server[start_and_monitor_server+5] RETURN_STATUS=0 +epprd_rg:start_server[start_and_monitor_server+7] server=epprd_app +epprd_rg:start_server[start_and_monitor_server+12] echo Checking whether epprd_app is already running...\n Checking whether epprd_app is already running... +epprd_rg:start_server[start_and_monitor_server+12] [[ -n ]] +epprd_rg:start_server[start_and_monitor_server+18] cl_app_startup_monitor -s epprd_app -a +epprd_rg:start_server[+261] wait +epprd_rg:start_server[start_and_monitor_server+21] RETURN_STATUS=1 +epprd_rg:start_server[start_and_monitor_server+22] : exit status of cl_app_startup_monitor is: 1 +epprd_rg:start_server[start_and_monitor_server+22] [[ 1 == 0 ]] +epprd_rg:start_server[start_and_monitor_server+33] echo Application monitor(s) indicate that epprd_app is not active. Continuing with application startup.\n Application monitor(s) indicate that epprd_app is not active. Continuing with application startup. +epprd_rg:start_server[start_and_monitor_server+42] +epprd_rg:start_server[start_and_monitor_server+42] cllsserv -cn epprd_app +epprd_rg:start_server[start_and_monitor_server+42] cut -d: -f2 START=/etc/hacmp/epprd_start.sh +epprd_rg:start_server[start_and_monitor_server+43] +epprd_rg:start_server[start_and_monitor_server+43] echo /etc/hacmp/epprd_start.sh +epprd_rg:start_server[start_and_monitor_server+43] cut -d -f1 START_SCRIPT=/etc/hacmp/epprd_start.sh +epprd_rg:start_server[start_and_monitor_server+44] +epprd_rg:start_server[start_and_monitor_server+44] cllsserv -cn epprd_app +epprd_rg:start_server[start_and_monitor_server+44] cut -d: -f4 START_MODE=background +epprd_rg:start_server[start_and_monitor_server+44] [[ -z background ]] +epprd_rg:start_server[start_and_monitor_server+47] PATTERN=epprda epprd_app +epprd_rg:start_server[start_and_monitor_server+48] RETURN_STATUS=0 +epprd_rg:start_server[start_and_monitor_server+51] amlog_trace Starting application controller in background|epprd_app +epprd_rg:start_server[start_and_monitor_server+200] clcycle clavailability.log +epprd_rg:start_server[start_and_monitor_server+200] 1> /dev/null 2>& 1 +epprd_rg:start_server[start_and_monitor_server+200] +epprd_rg:start_server[start_and_monitor_server+200] cltime DATE=2023-01-28T18:04:24.752913 +epprd_rg:start_server[start_and_monitor_server+200] echo |2023-01-28T18:04:24.752913|INFO: Starting application controller in background|epprd_app +epprd_rg:start_server[start_and_monitor_server+200] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:start_server[start_and_monitor_server+51] [[ -n ]] +epprd_rg:start_server[start_and_monitor_server+51] [[ -z ]] +epprd_rg:start_server[start_and_monitor_server+51] [[ -x /etc/hacmp/epprd_start.sh ]] +epprd_rg:start_server[start_and_monitor_server+60] [ background == background ] +epprd_rg:start_server[start_and_monitor_server+62] date +epprd_rg:start_server[start_and_monitor_server+62] LC_ALL=C +epprd_rg:start_server[start_and_monitor_server+62] echo Running application controller start script for epprd_app in the background at Sat Jan 28 18:04:24 KORST 2023.\n Running application controller start script for epprd_app in the background at Sat Jan 28 18:04:24 KORST 2023. +epprd_rg:start_server[start_and_monitor_server+63] /etc/hacmp/epprd_start.sh +epprd_rg:start_server[start_and_monitor_server+63] ODMDIR=/etc/es/objrepos +epprd_rg:start_server[start_and_monitor_server+62] [[ 0 != 0 ]] +epprd_rg:start_server[start_and_monitor_server+62] [[ -n ]] +epprd_rg:start_server[start_and_monitor_server+94] cl_app_startup_monitor -s epprd_app +epprd_rg:start_server[start_and_monitor_server+97] RETURN_STATUS=0 +epprd_rg:start_server[start_and_monitor_server+98] : exit status of cl_app_startup_monitor is: 0 +epprd_rg:start_server[start_and_monitor_server+98] [[ 0 != 0 ]] +epprd_rg:start_server[start_and_monitor_server+109] echo epprd_app 0 +epprd_rg:start_server[start_and_monitor_server+109] 1> /var/hacmp/log/.start_server.18743590.epprd_app +epprd_rg:start_server[start_and_monitor_server+112] +epprd_rg:start_server[start_and_monitor_server+112] cllsserv -cn epprd_app +epprd_rg:start_server[start_and_monitor_server+112] cut -d: -f4 START_MODE=background +epprd_rg:start_server[start_and_monitor_server+112] [[ background == foreground ]] +epprd_rg:start_server[start_and_monitor_server+132] return 0 +epprd_rg:start_server[+266] +epprd_rg:start_server[+266] cllsserv -cn epprd_app +epprd_rg:start_server[+266] cut -d: -f4 START_MODE=background +epprd_rg:start_server[+267] [ background == background ] +epprd_rg:start_server[+269] +epprd_rg:start_server[+269] cat /var/hacmp/log/.start_server.18743590.epprd_app +epprd_rg:start_server[+269] cut -f2 -d SUCCESS=0 +epprd_rg:start_server[+269] [[ 0 != 0 ]] +epprd_rg:start_server[+274] amlog_trace Starting application controller in background|epprd_app +epprd_rg:start_server[+200] clcycle clavailability.log +epprd_rg:start_server[+200] 1> /dev/null 2>& 1 +epprd_rg:start_server[+200] +epprd_rg:start_server[+200] cltime DATE=2023-01-28T18:04:24.797786 +epprd_rg:start_server[+200] echo |2023-01-28T18:04:24.797786|INFO: Starting application controller in background|epprd_app +epprd_rg:start_server[+200] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:start_server[+276] +epprd_rg:start_server[+276] clodmget -q name = epprd_app -n -f cpu_usage_monitor HACMPserver MACTIVE=no +epprd_rg:start_server[+276] [[ no == yes ]] +epprd_rg:start_server[+292] +epprd_rg:start_server[+292] cat /var/hacmp/log/.start_server.18743590.epprd_app +epprd_rg:start_server[+292] cut -f2 -d SUCCESS=0 +epprd_rg:start_server[+292] [[ 0 != +([0-9]) ]] +epprd_rg:start_server[+297] (( 0 != 0 )) +epprd_rg:start_server[+303] [[ 0 == 0 ]] +epprd_rg:start_server[+306] rm -f /var/hacmp/log/.start_server.18743590.epprd_app +epprd_rg:start_server[+308] cl_RMupdate resource_up All_nonerror_servers start_server 2023-01-28T18:04:24.827898 2023-01-28T18:04:24.832139 +epprd_rg:start_server[+314] exit 0 Jan 28 2023 18:04:24 EVENT COMPLETED: start_server epprd_app 0 |2023-01-28T18:04:24|8561|EVENT COMPLETED: start_server epprd_app 0| +epprd_rg:process_resources[start_or_stop_applications_for_rg:267] RC=0 +epprd_rg:process_resources[start_or_stop_applications_for_rg:269] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[start_or_stop_applications_for_rg:279] (( 0 != 0 )) +epprd_rg:process_resources[start_or_stop_applications_for_rg:291] : Store the result for later accumulation +epprd_rg:process_resources[start_or_stop_applications_for_rg:293] print 'epprd_rg 0' +epprd_rg:process_resources[start_or_stop_applications_for_rg:293] 1>> /var/hacmp/log/.process_resources_applications.26214862.epprd_rg +epprd_rg:process_resources[process_applications:396] : Look at all the status files to see if any were unsuccessful +epprd_rg:process_resources[process_applications:399] cat /var/hacmp/log/.process_resources_applications.26214862.epprd_rg +epprd_rg:process_resources[process_applications:399] read skip SUCCESS rest +epprd_rg:process_resources[process_applications:401] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:411] rm -f /var/hacmp/log/.process_resources_applications.26214862.epprd_rg +epprd_rg:process_resources[process_applications:416] : Release lpar resources in one-shot now that applications are stopped +epprd_rg:process_resources[process_applications:418] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[process_applications:433] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:434] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:435] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:439] return 0 +epprd_rg:process_resources[3550] RC=0 +epprd_rg:process_resources[3551] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:04:24.929861 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=ONLINE RESOURCE_GROUPS='"epprd_rg"' +epprd_rg:process_resources[1] JOB_TYPE=ONLINE +epprd_rg:process_resources[1] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ ONLINE == RELEASE ]] +epprd_rg:process_resources[3360] [[ ONLINE == ONLINE ]] +epprd_rg:process_resources[3363] INFO_STRING='' +epprd_rg:process_resources[3364] clnodename +epprd_rg:process_resources[3373] ENV_VAR=GROUP_epprd_rg_epprda +epprd_rg:process_resources[3374] eval 'echo $GROUP_epprd_rg_epprda' +epprd_rg:process_resources[1] echo WILLBEUPPOSTEVENT +epprd_rg:process_resources[3374] read ENV_VAR +epprd_rg:process_resources[3375] [[ WILLBEUPPOSTEVENT == WILLBEUPPOSTEVENT ]] +epprd_rg:process_resources[3376] INFO_STRING='|DESTINATION=epprda' +epprd_rg:process_resources[3377] IS_SERVICE_STOP=0 +epprd_rg:process_resources[3379] [[ WILLBEUPPOSTEVENT == ISUPPREEVENT ]] +epprd_rg:process_resources[3373] ENV_VAR=GROUP_epprd_rg_epprds +epprd_rg:process_resources[3374] eval 'echo $GROUP_epprd_rg_epprds' +epprd_rg:process_resources[1] echo +epprd_rg:process_resources[3374] read ENV_VAR +epprd_rg:process_resources[3375] [[ '' == WILLBEUPPOSTEVENT ]] +epprd_rg:process_resources[3379] [[ '' == ISUPPREEVENT ]] +epprd_rg:process_resources[3384] (( 1 == 0 && 0 ==0 )) +epprd_rg:process_resources[3673] set_resource_group_state UP +epprd_rg:process_resources[set_resource_group_state:82] PS4_FUNC=set_resource_group_state +epprd_rg:process_resources[set_resource_group_state:82] typeset PS4_FUNC +epprd_rg:process_resources[set_resource_group_state:83] [[ high == high ]] +epprd_rg:process_resources[set_resource_group_state:83] set -x +epprd_rg:process_resources[set_resource_group_state:84] STAT=0 +epprd_rg:process_resources[set_resource_group_state:85] new_status=UP +epprd_rg:process_resources[set_resource_group_state:89] export GROUPNAME +epprd_rg:process_resources[set_resource_group_state:90] [[ UP != DOWN ]] +epprd_rg:process_resources[set_resource_group_state:92] clchdaemons -d clstrmgr_scripts -t resource_locator -n epprda -o epprd_rg -v UP +epprd_rg:process_resources[set_resource_group_state:100] : Resource Manager Updates +epprd_rg:process_resources[set_resource_group_state:116] cl_RMupdate rg_up epprd_rg process_resources 2023-01-28T18:04:24.968432 2023-01-28T18:04:24.972692 +epprd_rg:process_resources[set_resource_group_state:118] amlog_trace '' 'acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:318] clcycle clavailability.log +epprd_rg:process_resources[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:process_resources[amlog_trace:319] cltime +epprd_rg:process_resources[amlog_trace:319] DATE=2023-01-28T18:04:25.003279 +epprd_rg:process_resources[amlog_trace:320] echo '|2023-01-28T18:04:25.003279|INFO: acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:process_resources[set_resource_group_state:153] return 0 +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:04:25.015270 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=NONE +epprd_rg:process_resources[1] JOB_TYPE=NONE +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ NONE == RELEASE ]] +epprd_rg:process_resources[3360] [[ NONE == ONLINE ]] +epprd_rg:process_resources[3729] break +epprd_rg:process_resources[3740] : If sddsrv was turned off above, turn it back on again +epprd_rg:process_resources[3742] [[ FALSE == TRUE ]] +epprd_rg:process_resources[3747] exit 0 +epprd_rg:rg_move_complete[455] STATUS=0 +epprd_rg:rg_move_complete[456] : The exit status of process_resources is: 0 +epprd_rg:rg_move_complete[461] unset AM_SYNC_CALLED_BY +epprd_rg:rg_move_complete[462] [[ TRUE == TRUE ]] +epprd_rg:rg_move_complete[491] [[ -z '' ]] +epprd_rg:rg_move_complete[493] RESOURCE_GROUPS=epprd_rg +epprd_rg:rg_move_complete[499] GROUPNAME=epprd_rg +epprd_rg:rg_move_complete[499] export GROUPNAME +epprd_rg:rg_move_complete[501] cl_rrmethods2call postrg_move +epprd_rg:cl_rrmethods2call[56] version=%I% +epprd_rg:cl_rrmethods2call[84] RRMETHODS='' +epprd_rg:cl_rrmethods2call[85] NEED_RR_ENV_VARS=no +epprd_rg:cl_rrmethods2call[124] NEED_RR_ENV_VARS=yes +epprd_rg:cl_rrmethods2call[129] : Set the '*_REP_RESOURCE' variables if needed. +epprd_rg:cl_rrmethods2call[131] [[ yes == yes ]] +epprd_rg:cl_rrmethods2call[133] cllsres +epprd_rg:cl_rrmethods2call[133] 2> /dev/null +epprd_rg:cl_rrmethods2call[133] eval APPLICATIONS='"epprd_app"' EXPORT_FILESYSTEM='"/board_org' '/sapmnt/EPP"' FILESYSTEM='""' FORCED_VARYON='"false"' FSCHECK_TOOL='"fsck"' FS_BEFORE_IPADDR='"false"' MOUNT_FILESYSTEM='"/board;/board_org"' RECOVERY_METHOD='"sequential"' SERVICE_LABEL='"epprd"' SSA_DISK_FENCING='"false"' VG_AUTO_IMPORT='"false"' VOLUME_GROUP='"datavg"' USERDEFINED_RESOURCES='""' +epprd_rg:cl_rrmethods2call[1] APPLICATIONS=epprd_app +epprd_rg:cl_rrmethods2call[1] EXPORT_FILESYSTEM='/board_org /sapmnt/EPP' +epprd_rg:cl_rrmethods2call[1] FILESYSTEM='' +epprd_rg:cl_rrmethods2call[1] FORCED_VARYON=false +epprd_rg:cl_rrmethods2call[1] FSCHECK_TOOL=fsck +epprd_rg:cl_rrmethods2call[1] FS_BEFORE_IPADDR=false +epprd_rg:cl_rrmethods2call[1] MOUNT_FILESYSTEM='/board;/board_org' +epprd_rg:cl_rrmethods2call[1] RECOVERY_METHOD=sequential +epprd_rg:cl_rrmethods2call[1] SERVICE_LABEL=epprd +epprd_rg:cl_rrmethods2call[1] SSA_DISK_FENCING=false +epprd_rg:cl_rrmethods2call[1] VG_AUTO_IMPORT=false +epprd_rg:cl_rrmethods2call[1] VOLUME_GROUP=datavg +epprd_rg:cl_rrmethods2call[1] USERDEFINED_RESOURCES='' +epprd_rg:cl_rrmethods2call[137] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[142] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[147] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[152] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[157] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[162] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[167] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[172] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[182] [[ -z '' ]] +epprd_rg:cl_rrmethods2call[184] typeset sysmgdata +epprd_rg:cl_rrmethods2call[185] typeset reposmgdata +epprd_rg:cl_rrmethods2call[186] [[ -x /usr/es/sbin/cluster/xd_generic/xd_cli/clxd_list_mg_smit ]] +epprd_rg:cl_rrmethods2call[191] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[191] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[197] echo '' +epprd_rg:cl_rrmethods2call[199] return 0 +epprd_rg:rg_move_complete[501] METHODS='' +epprd_rg:rg_move_complete[516] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. +epprd_rg:rg_move_complete[518] exit 0 Jan 28 2023 18:04:25 EVENT COMPLETED: rg_move_complete epprda 1 0 |2023-01-28T18:04:25|8561|EVENT COMPLETED: rg_move_complete epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:04:25.128226 + echo '|2023-01-28T18:04:25.128226|INFO: rg_move_complete|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log PowerHA SystemMirror Event Summary ---------------------------------------------------------------------------- Serial number for this event: 8561 Event: TE_RG_MOVE_ACQUIRE Start time: Sat Jan 28 18:03:38 2023 End time: Sat Jan 28 18:04:25 2023 Action: Resource: Script Name: ---------------------------------------------------------------------------- Acquiring resource group: epprd_rg process_resources Search on: Sat.Jan.28.18:03:39.KORST.2023.process_resources.epprd_rg.ref Acquiring resource: All_service_addrs acquire_service_addr Search on: Sat.Jan.28.18:03:39.KORST.2023.acquire_service_addr.All_service_addrs.epprd_rg.ref Resource online: All_nonerror_service_addrs acquire_service_addr Search on: Sat.Jan.28.18:03:39.KORST.2023.acquire_service_addr.All_nonerror_service_addrs.epprd_rg.ref Acquiring resource: All_volume_groups cl_activate_vgs Search on: Sat.Jan.28.18:03:39.KORST.2023.cl_activate_vgs.All_volume_groups.epprd_rg.ref Resource online: All_nonerror_volume_groups cl_activate_vgs Search on: Sat.Jan.28.18:03:43.KORST.2023.cl_activate_vgs.All_nonerror_volume_groups.epprd_rg.ref Acquiring resource: All_filesystems cl_activate_fs Search on: Sat.Jan.28.18:03:45.KORST.2023.cl_activate_fs.All_filesystems.epprd_rg.ref Resource online: All_non_error_filesystems cl_activate_fs Search on: Sat.Jan.28.18:03:47.KORST.2023.cl_activate_fs.All_non_error_filesystems.epprd_rg.ref Acquiring resource: All_exports cl_export_fs Search on: Sat.Jan.28.18:03:52.KORST.2023.cl_export_fs.All_exports.epprd_rg.ref Resource online: All_nonerror_exports cl_export_fs Search on: Sat.Jan.28.18:03:52.KORST.2023.cl_export_fs.All_nonerror_exports.epprd_rg.ref Acquiring resource: All_nfs_mounts cl_activate_nfs Search on: Sat.Jan.28.18:03:52.KORST.2023.cl_activate_nfs.All_nfs_mounts.epprd_rg.ref Acquiring resource: All_servers start_server Search on: Sat.Jan.28.18:04:24.KORST.2023.start_server.All_servers.epprd_rg.ref Resource online: All_nonerror_servers start_server Search on: Sat.Jan.28.18:04:24.KORST.2023.start_server.All_nonerror_servers.epprd_rg.ref Resource group online: epprd_rg process_resources Search on: Sat.Jan.28.18:04:24.KORST.2023.process_resources.epprd_rg.ref ---------------------------------------------------------------------------- |EVENT_SUMMARY_START|TE_RG_MOVE_ACQUIRE|2023-01-28T18:03:38|2023-01-28T18:04:25|8561| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:03:39.KORST.2023.process_resources.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:03:39.KORST.2023.acquire_service_addr.All_service_addrs.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:03:39.KORST.2023.acquire_service_addr.All_nonerror_service_addrs.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:03:39.KORST.2023.cl_activate_vgs.All_volume_groups.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:03:43.KORST.2023.cl_activate_vgs.All_nonerror_volume_groups.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:03:45.KORST.2023.cl_activate_fs.All_filesystems.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:03:47.KORST.2023.cl_activate_fs.All_non_error_filesystems.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:03:52.KORST.2023.cl_export_fs.All_exports.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:03:52.KORST.2023.cl_export_fs.All_nonerror_exports.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:03:52.KORST.2023.cl_activate_nfs.All_nfs_mounts.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:04:24.KORST.2023.start_server.All_servers.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:04:24.KORST.2023.start_server.All_nonerror_servers.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:04:24.KORST.2023.process_resources.epprd_rg.ref.ref| |EVENT_SUMMARY_END| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 8562 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_NODE_DEP_COMPLETE|2023-01-28T18:04:27|8562| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Jan 28 2023 18:04:27 EVENT START: node_up_complete epprda |2023-01-28T18:04:27|8562|EVENT START: node_up_complete epprda| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:04:27.329931 + echo '|2023-01-28T18:04:27.329931|INFO: node_up_complete|epprda' + 1>> /var/hacmp/availability/clavailability.log + version=%I% + set -a + cllsparam -n epprda + eval NODE_NAME=epprda VERBOSE_LOGGING=high PS4=$'\'${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO]' $'\'' DEBUG_LEVEL=Standard LC_ALL=$'\'C\'' + NODE_NAME=epprda + VERBOSE_LOGGING=high :node_up_complete[1] PS4='${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO] ' :node_up_complete[1] DEBUG_LEVEL=Standard :node_up_complete[1] LC_ALL=C :node_up_complete[80] set +a :node_up_complete[82] NODENAME=epprda :node_up_complete[83] RC=0 :node_up_complete[83] typeset -i RC :node_up_complete[84] UPDATESTATD=0 :node_up_complete[84] typeset -i UPDATESTATD :node_up_complete[86] LPM_IN_PROGRESS_DIR=/var/hacmp/.lpm_in_progress :node_up_complete[86] typeset LPM_IN_PROGRESS_DIR :node_up_complete[87] LPM_IN_PROGRESS_PREFIX=lpm :node_up_complete[87] typeset LPM_IN_PROGRESS_PREFIX :node_up_complete[88] STATE_FILE=/var/hacmp/cl_dr.state :node_up_complete[88] typeset STATE_FILE :node_up_complete[97] STATUS=0 :node_up_complete[99] set -u :node_up_complete[101] (( 1 < 1 )) :node_up_complete[107] START_MODE='' :node_up_complete[107] typeset START_MODE :node_up_complete[108] (( 1 > 1 )) :node_up_complete[114] : serial number for this event is 8562 :node_up_complete[118] RPCLOCKDSTOPPED=0 :node_up_complete[118] typeset -i RPCLOCKDSTOPPED :node_up_complete[119] [[ -f /tmp/.RPCLOCKDSTOPPED ]] :node_up_complete[127] clnodename :node_up_complete[127] wc -l :node_up_complete[127] (( 2 == 2 )) :node_up_complete[129] clodmget -f group -n HACMPgroup :node_up_complete[129] RESOURCE_GROUPS=epprd_rg :node_up_complete[132] clodmget -q group='epprd_rg AND name=EXPORT_FILESYSTEM' -f value -n HACMPresource :node_up_complete[132] EXPORTLIST=$'/board_org\n/sapmnt/EPP' :node_up_complete[133] [[ -n $'/board_org\n/sapmnt/EPP' ]] :node_up_complete[135] UPDATESTATD=1 :node_up_complete[136] [[ epprda == epprda ]] :node_up_complete[139] lssrc -s rpc.statd :node_up_complete[139] LC_ALL=C :node_up_complete[139] grep inoperative :node_up_complete[140] (( 1 == 0 )) :node_up_complete[146] cl_update_statd :cl_update_statd(0)[+174] version=%I% :cl_update_statd(0)[+176] typeset -i RC=0 :cl_update_statd(0)[+178] LOCAL_FOUND= :cl_update_statd(0)[+179] TWIN_NAME= :cl_update_statd(0)[+180] [[ -z epprda ]] :cl_update_statd(0)[+181] :cl_update_statd(0)[+181] cl_get_path -S OP_SEP=~ :cl_update_statd(0)[+182] set -u :cl_update_statd(0)[+187] LOCAL_FOUND=true :cl_update_statd(0)[+194] : Make sure statd is running locally :cl_update_statd(0)[+196] lssrc -s statd :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw inoperative :cl_update_statd(0)[+196] rpcinfo -p :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw status :cl_update_statd(0)[+207] : Get the current twin, if there is one :cl_update_statd(0)[+209] :cl_update_statd(0)[+209] nfso -H sm_gethost :cl_update_statd(0)[+209] 2>& 1 CURTWIN= :cl_update_statd(0)[+210] RC=0 :cl_update_statd(0)[+212] [[ -z true ]] :cl_update_statd(0)[+212] [[ -z ]] :cl_update_statd(0)[+215] : Local node is no longer a cluster member, unregister its twin :cl_update_statd(0)[+215] [[ -n ]] :cl_update_statd(0)[+259] : RC is actually 0 :cl_update_statd(0)[+266] return 0 :node_up_complete[147] (( 0 )) :node_up_complete[151] break :node_up_complete[156] (( 1 )) :node_up_complete[158] (( 0 )) :node_up_complete[198] [[ TRUE == FALSE ]] :node_up_complete[268] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. :node_up_complete[270] : This is the final clRGinfo output :node_up_complete[272] clRGinfo -p -t :node_up_complete[272] 2>& 1 clRGinfo[431]: version I clRGinfo[517]: Number of resource groups = 0 clRGinfo[562]: cluster epprda_cluster is version = 22 clRGinfo[597]: no resource groups specified on command line - print all clRGinfo[685]: Current group is 'epprd_rg' get primary state info for state 6 get secondary state info for state 6 getPrimaryStateStr: using primary_table => primary_state_table get primary state info for state 4 get secondary state info for state 4 getPrimaryStateStr: using primary_table => primary_state_table Cluster Name: epprda_cluster Resource Group Name: epprd_rg Node Group State Delayed Timers ---------------------------------------------------------------- --------------- ------------------- epprda ONLINE epprds OFFLINE :node_up_complete[277] (( 0 == 0 )) :node_up_complete[279] [[ epprda != epprda ]] :node_up_complete[300] exit 0 Jan 28 2023 18:04:27 EVENT COMPLETED: node_up_complete epprda 0 |2023-01-28T18:04:27|8562|EVENT COMPLETED: node_up_complete epprda 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:04:27.531211 + echo '|2023-01-28T18:04:27.531211|INFO: node_up_complete|epprda|0' + 1>> /var/hacmp/availability/clavailability.log PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 1580 Cluster services started on node 'epprds' Node Up Completion Event has been enqueued. ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_NODE_DEP|2023-01-28T18:04:33|1580| |NODE_UP_COMPLETE| |EVENT_PREAMBLE_END| Jan 28 2023 18:04:35 EVENT START: node_up epprds |2023-01-28T18:04:35|1580|EVENT START: node_up epprds| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:04:35.354397 + echo '|2023-01-28T18:04:35.354397|INFO: node_up|epprds' + 1>> /var/hacmp/availability/clavailability.log :node_up[182] version=%I% :node_up[185] NODENAME=epprds :node_up[185] export NODENAME :node_up[193] STATUS=0 :node_up[193] typeset -li STATUS :node_up[194] RC=0 :node_up[194] typeset -li RC :node_up[195] ENABLE_NFS_CROSS_MOUNT=false :node_up[196] START_MODE='' :node_up[196] typeset START_MODE :node_up[198] set -u :node_up[200] (( 1 < 1 )) :node_up[200] (( 1 > 2 )) :node_up[207] : serial number for this event is 1580 :node_up[210] [[ epprda == epprds ]] :node_up[219] (( 1 > 1 )) :node_up[256] : If RG_DEPENDENCIES=false, process RGs with clsetenvgrp :node_up[258] [[ TRUE == FALSE ]] :node_up[281] : localnode processing prior to RG acquisition :node_up[283] [[ epprda == epprds ]] :node_up[498] : Enable NFS crossmounts during manual start :node_up[500] [[ -n false ]] :node_up[500] [[ false == true ]] :node_up[607] : When RG dependencies are not configured we call node_up_local/remote, :node_up[608] : followed by process_resources to process any remaining groups :node_up[610] [[ TRUE == FALSE ]] :node_up[657] [[ epprda == epprds ]] :node_up[667] return 0 Jan 28 2023 18:04:35 EVENT COMPLETED: node_up epprds 0 |2023-01-28T18:04:35|1580|EVENT COMPLETED: node_up epprds 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:04:35.478485 + echo '|2023-01-28T18:04:35.478485|INFO: node_up|epprds|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 18:04:38 EVENT START: rg_move_fence epprds 1 |2023-01-28T18:04:38|1581|EVENT START: rg_move_fence epprds 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:04:38.919507 + echo '|2023-01-28T18:04:38.919507|INFO: rg_move_fence|epprd_rg|epprds|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_fence[62] [[ high == high ]] :rg_move_fence[62] version=1.11 :rg_move_fence[63] NODENAME=epprds :rg_move_fence[63] export NODENAME :rg_move_fence[65] set -u :rg_move_fence[67] [ 2 != 2 ] :rg_move_fence[73] set +u :rg_move_fence[75] [[ -z TRUE ]] :rg_move_fence[80] [[ TRUE == TRUE ]] :rg_move_fence[82] LOCAL_NODENAME=epprda :rg_move_fence[83] odmget -qid=1 HACMPgroup :rg_move_fence[83] egrep 'group =' :rg_move_fence[83] awk '{print $3}' :rg_move_fence[83] eval RGNAME='"epprd_rg"' :rg_move_fence[1] RGNAME=epprd_rg +epprd_rg:rg_move_fence[84] GROUPNAME=epprd_rg +epprd_rg:rg_move_fence[85] group_state='$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[86] set +u +epprd_rg:rg_move_fence[87] eval print '$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[1] print ONLINE +epprd_rg:rg_move_fence[87] RG_MOVE_ONLINE=ONLINE +epprd_rg:rg_move_fence[87] export RG_MOVE_ONLINE +epprd_rg:rg_move_fence[88] set -u +epprd_rg:rg_move_fence[89] RG_MOVE_ONLINE=ONLINE +epprd_rg:rg_move_fence[91] set -a +epprd_rg:rg_move_fence[92] clsetenvgrp epprda rg_move epprd_rg '' :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 +epprd_rg:rg_move_fence[92] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[93] RC=0 +epprd_rg:rg_move_fence[94] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[1] FORCEDOWN_GROUPS='' +epprd_rg:rg_move_fence[2] RESOURCE_GROUPS='' +epprd_rg:rg_move_fence[3] HOMELESS_GROUPS='' +epprd_rg:rg_move_fence[4] HOMELESS_FOLLOWER_GROUPS='' +epprd_rg:rg_move_fence[5] ERRSTATE_GROUPS='' +epprd_rg:rg_move_fence[6] PRINCIPAL_ACTIONS='' +epprd_rg:rg_move_fence[7] ASSOCIATE_ACTIONS='' +epprd_rg:rg_move_fence[8] AUXILLIARY_ACTIONS='' +epprd_rg:rg_move_fence[8] SIBLING_GROUPS='' +epprd_rg:rg_move_fence[9] SIBLING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[10] SIBLING_ACQUIRING_GROUPS='' +epprd_rg:rg_move_fence[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[12] SIBLING_RELEASING_GROUPS='' +epprd_rg:rg_move_fence[13] SIBLING_RELEASING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[95] set +a +epprd_rg:rg_move_fence[96] [ 0 -ne 0 ] +epprd_rg:rg_move_fence[103] process_resources FENCE :rg_move_fence[3318] version=1.169 :rg_move_fence[3321] STATUS=0 :rg_move_fence[3322] sddsrv_off=FALSE :rg_move_fence[3324] true :rg_move_fence[3326] : call rgpa, and it will tell us what to do next :rg_move_fence[3328] set -a :rg_move_fence[3329] clRGPA FENCE :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa FENCE 2023-01-28T18:04:39.024456 clrgpa :clRGPA[+55] exit 0 :rg_move_fence[3329] eval JOB_TYPE=NONE :rg_move_fence[1] JOB_TYPE=NONE :rg_move_fence[3330] RC=0 :rg_move_fence[3331] set +a :rg_move_fence[3333] (( 0 != 0 )) :rg_move_fence[3342] RESOURCE_GROUPS='' :rg_move_fence[3343] GROUPNAME='' :rg_move_fence[3343] export GROUPNAME :rg_move_fence[3353] IS_SERVICE_START=1 :rg_move_fence[3354] IS_SERVICE_STOP=1 :rg_move_fence[3360] [[ NONE == RELEASE ]] :rg_move_fence[3360] [[ NONE == ONLINE ]] :rg_move_fence[3729] break :rg_move_fence[3740] : If sddsrv was turned off above, turn it back on again :rg_move_fence[3742] [[ FALSE == TRUE ]] :rg_move_fence[3747] exit 0 +epprd_rg:rg_move_fence[104] : exit status of process_resources FENCE is: 0 +epprd_rg:rg_move_fence[107] [[ TRUE == TRUE ]] +epprd_rg:rg_move_fence[109] export EVENT_TYPE +epprd_rg:rg_move_fence[110] echo ACQUIRE_PRIMARY_NFS ACQUIRE_PRIMARY_NFS +epprd_rg:rg_move_fence[111] [[ -n '' ]] +epprd_rg:rg_move_fence[141] exit 0 Jan 28 2023 18:04:39 EVENT COMPLETED: rg_move_fence epprds 1 0 |2023-01-28T18:04:39|1581|EVENT COMPLETED: rg_move_fence epprds 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:04:39.117450 + echo '|2023-01-28T18:04:39.117450|INFO: rg_move_fence|epprd_rg|epprds|1|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 18:04:39 EVENT START: rg_move_acquire epprds 1 |2023-01-28T18:04:39|1581|EVENT START: rg_move_acquire epprds 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:04:39.322098 + echo '|2023-01-28T18:04:39.322098|INFO: rg_move_acquire|epprd_rg|epprds|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_acquire[+54] [[ high == high ]] :rg_move_acquire[+54] version=1.9.1.7 :rg_move_acquire[+57] set -u :rg_move_acquire[+59] [ 2 != 2 ] :rg_move_acquire[+65] set +u :rg_move_acquire[+67] :rg_move_acquire[+67] clodmget -n -q id=1 -f group HACMPgroup RG=epprd_rg :rg_move_acquire[+68] export RG :rg_move_acquire[+70] [[ ACQUIRE_PRIMARY_NFS == ACQUIRE_PRIMARY ]] :rg_move_acquire[+118] clcallev rg_move epprds 1 ACQUIRE Jan 28 2023 18:04:39 EVENT START: rg_move epprds 1 ACQUIRE |2023-01-28T18:04:39|1581|EVENT START: rg_move epprds 1 ACQUIRE| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-01-28T18:04:39.448267 :clevlog[amlog_trace:320] echo '|2023-01-28T18:04:39.448267|INFO: rg_move|epprd_rg|epprds|1|ACQUIRE' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :rg_move[76] version=%I% :rg_move[86] STATUS=0 :rg_move[88] [[ ! -n '' ]] :rg_move[90] EMULATE=REAL :rg_move[96] set -u :rg_move[98] NODENAME=epprds :rg_move[98] export NODENAME :rg_move[99] RGID=1 :rg_move[100] (( 3 == 3 )) :rg_move[102] ACTION=ACQUIRE :rg_move[108] : serial number for this event is 1581 :rg_move[112] RG_UP_POSTEVENT_ON_NODE=epprds :rg_move[112] export RG_UP_POSTEVENT_ON_NODE :rg_move[116] clodmget -qid=1 -f group -n HACMPgroup :rg_move[116] eval RGNAME=epprd_rg :rg_move[1] RGNAME=epprd_rg :rg_move[118] UPDATESTATD=0 :rg_move[119] export UPDATESTATD :rg_move[123] RG_MOVE_EVENT=true :rg_move[123] export RG_MOVE_EVENT :rg_move[128] group_state='$RESGRP_epprd_rg_epprda' :rg_move[129] set +u :rg_move[130] eval print '$RESGRP_epprd_rg_epprda' :rg_move[1] print ONLINE :rg_move[130] RG_MOVE_ONLINE=ONLINE :rg_move[130] export RG_MOVE_ONLINE :rg_move[131] set -u :rg_move[132] RG_MOVE_ONLINE=ONLINE :rg_move[139] rm -f /tmp/.NFSSTOPPED :rg_move[140] rm -f /tmp/.RPCLOCKDSTOPPED :rg_move[147] set -a :rg_move[148] clsetenvgrp epprds rg_move epprd_rg :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprds rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 :rg_move[148] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[149] RC=0 :rg_move[150] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[1] FORCEDOWN_GROUPS='' :rg_move[2] RESOURCE_GROUPS='' :rg_move[3] HOMELESS_GROUPS='' :rg_move[4] HOMELESS_FOLLOWER_GROUPS='' :rg_move[5] ERRSTATE_GROUPS='' :rg_move[6] PRINCIPAL_ACTIONS='' :rg_move[7] ASSOCIATE_ACTIONS='' :rg_move[8] AUXILLIARY_ACTIONS='' :rg_move[8] SIBLING_GROUPS='' :rg_move[9] SIBLING_NODES_BY_GROUP='' :rg_move[10] SIBLING_ACQUIRING_GROUPS='' :rg_move[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' :rg_move[12] SIBLING_RELEASING_GROUPS='' :rg_move[13] SIBLING_RELEASING_NODES_BY_GROUP='' :rg_move[151] set +a :rg_move[155] (( 0 != 0 )) :rg_move[155] [[ -z epprd_rg ]] :rg_move[164] [[ -z TRUE ]] :rg_move[241] AM_SYNC_CALLED_BY=RG_MOVE :rg_move[241] export AM_SYNC_CALLED_BY :rg_move[242] process_resources :process_resources[3318] version=1.169 :process_resources[3321] STATUS=0 :process_resources[3322] sddsrv_off=FALSE :process_resources[3324] true :process_resources[3326] : call rgpa, and it will tell us what to do next :process_resources[3328] set -a :process_resources[3329] clRGPA :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa 2023-01-28T18:04:39.569908 clrgpa :clRGPA[+55] exit 0 :process_resources[3329] eval JOB_TYPE=NONE :process_resources[1] JOB_TYPE=NONE :process_resources[3330] RC=0 :process_resources[3331] set +a :process_resources[3333] (( 0 != 0 )) :process_resources[3342] RESOURCE_GROUPS='' :process_resources[3343] GROUPNAME='' :process_resources[3343] export GROUPNAME :process_resources[3353] IS_SERVICE_START=1 :process_resources[3354] IS_SERVICE_STOP=1 :process_resources[3360] [[ NONE == RELEASE ]] :process_resources[3360] [[ NONE == ONLINE ]] :process_resources[3729] break :process_resources[3740] : If sddsrv was turned off above, turn it back on again :process_resources[3742] [[ FALSE == TRUE ]] :process_resources[3747] exit 0 :rg_move[247] : unsetting AM_SYNC_CALLED_BY from $'callers environment as\n: we dont' require it after this point in execution. :rg_move[250] unset AM_SYNC_CALLED_BY :rg_move[253] [[ -f /tmp/.NFSSTOPPED ]] :rg_move[274] [[ -f /tmp/.RPCLOCKDSTOPPED ]] :rg_move[293] exit 0 Jan 28 2023 18:04:39 EVENT COMPLETED: rg_move epprds 1 ACQUIRE 0 |2023-01-28T18:04:39|1581|EVENT COMPLETED: rg_move epprds 1 ACQUIRE 0| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-01-28T18:04:39.702692 :clevlog[amlog_trace:320] echo '|2023-01-28T18:04:39.702692|INFO: rg_move|epprd_rg|epprds|1|ACQUIRE|0' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :rg_move_acquire[+119] exit_status=0 :rg_move_acquire[+120] : exit status of clcallev rg_move epprds 1 ACQUIRE is: 0 :rg_move_acquire[+121] exit 0 Jan 28 2023 18:04:39 EVENT COMPLETED: rg_move_acquire epprds 1 0 |2023-01-28T18:04:39|1581|EVENT COMPLETED: rg_move_acquire epprds 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:04:39.826719 + echo '|2023-01-28T18:04:39.826719|INFO: rg_move_acquire|epprd_rg|epprds|1|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 18:04:39 EVENT START: rg_move_complete epprds 1 |2023-01-28T18:04:39|1581|EVENT START: rg_move_complete epprds 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:04:40.019901 + echo '|2023-01-28T18:04:40.019901|INFO: rg_move_complete|epprd_rg|epprds|1' + 1>> /var/hacmp/availability/clavailability.log :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :rg_move_complete[91] version=%I% :rg_move_complete[97] STATUS=0 :rg_move_complete[97] typeset -li STATUS :rg_move_complete[99] [[ -z '' ]] :rg_move_complete[101] EMULATE=REAL :rg_move_complete[104] set -u :rg_move_complete[106] (( 2 < 2 || 2 > 3 )) :rg_move_complete[112] NODENAME=epprds :rg_move_complete[112] export NODENAME :rg_move_complete[113] RGID=1 :rg_move_complete[114] (( 2 == 3 )) :rg_move_complete[118] RGDESTINATION='' :rg_move_complete[122] : serial number for this event is 1581 :rg_move_complete[126] : Interpret resource group ID into a resource group name. :rg_move_complete[128] clodmget -qid=1 -f group -n HACMPgroup :rg_move_complete[128] eval RGNAME=epprd_rg :rg_move_complete[1] RGNAME=epprd_rg +epprd_rg:rg_move_complete[129] GROUPNAME=epprd_rg +epprd_rg:rg_move_complete[131] UPDATESTATD=0 +epprd_rg:rg_move_complete[131] typeset -li UPDATESTATD +epprd_rg:rg_move_complete[132] NFSSTOPPED=0 +epprd_rg:rg_move_complete[132] typeset -li NFSSTOPPED +epprd_rg:rg_move_complete[133] LIMIT=60 +epprd_rg:rg_move_complete[133] WAIT=1 +epprd_rg:rg_move_complete[133] TRY=0 +epprd_rg:rg_move_complete[133] typeset -li LIMIT WAIT TRY +epprd_rg:rg_move_complete[136] : If this is a two node cluster and exported filesystems exist, then +epprd_rg:rg_move_complete[137] : when the cluster topology is stable notify rpc.statd of the changes. +epprd_rg:rg_move_complete[139] clnodename +epprd_rg:rg_move_complete[139] wc -l +epprd_rg:rg_move_complete[139] (( 2 == 2 )) +epprd_rg:rg_move_complete[141] clodmget -f group -n HACMPgroup +epprd_rg:rg_move_complete[141] RESOURCE_GROUPS=epprd_rg +epprd_rg:rg_move_complete[144] clodmget -q group='epprd_rg AND name=EXPORT_FILESYSTEM' -f value -n HACMPresource +epprd_rg:rg_move_complete[144] EXPORTLIST=$'/board_org\n/sapmnt/EPP' +epprd_rg:rg_move_complete[146] [[ -n $'/board_org\n/sapmnt/EPP' ]] +epprd_rg:rg_move_complete[146] [[ epprd_rg == epprd_rg ]] +epprd_rg:rg_move_complete[148] UPDATESTATD=1 +epprd_rg:rg_move_complete[149] [[ REAL == EMUL ]] +epprd_rg:rg_move_complete[154] cl_update_statd :cl_update_statd(0)[+174] version=%I% :cl_update_statd(0)[+176] typeset -i RC=0 :cl_update_statd(0)[+178] LOCAL_FOUND= :cl_update_statd(0)[+179] TWIN_NAME= :cl_update_statd(0)[+180] [[ -z epprda ]] :cl_update_statd(0)[+181] :cl_update_statd(0)[+181] cl_get_path -S OP_SEP=~ :cl_update_statd(0)[+182] set -u :cl_update_statd(0)[+187] LOCAL_FOUND=true :cl_update_statd(0)[+189] TWIN_NAME=epprds :cl_update_statd(0)[+194] : Make sure statd is running locally :cl_update_statd(0)[+196] lssrc -s statd :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw inoperative :cl_update_statd(0)[+196] rpcinfo -p :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw status :cl_update_statd(0)[+207] : Get the current twin, if there is one :cl_update_statd(0)[+209] :cl_update_statd(0)[+209] nfso -H sm_gethost :cl_update_statd(0)[+209] 2>& 1 CURTWIN= :cl_update_statd(0)[+210] RC=0 :cl_update_statd(0)[+212] [[ -z true ]] :cl_update_statd(0)[+212] [[ -z epprds ]] :cl_update_statd(0)[+225] : Get the interface to the twin node :cl_update_statd(0)[+227] :cl_update_statd(0)[+227] get_node_ip epprds :cl_update_statd(0)[+9] (( 1 != 1 )) :cl_update_statd(0)[+15] Twin_Name=epprds :cl_update_statd(0)[+16] NewTwin= :cl_update_statd(0)[+19] : Get the Interface details for every interface on the twin node :cl_update_statd(0)[+20] : Reject interfaces on nodes that are not public boot addresses :cl_update_statd(0)[+21] : because those are the only ones we have state information for :cl_update_statd(0)[+23] :cl_update_statd(0)[+23] cllsif -J ~ -Sw -i epprda :cl_update_statd(0)[+23] LC_ALL=C LOCAL_NETWORK_INFO=epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+24] cllsif -J ~ -Sw -i epprds :cl_update_statd(0)[+24] LC_ALL=C :cl_update_statd(0)[+25] read adapt type network net_type attrib node ip_addr skip interface skip netmask skip skip prefix ip_family :cl_update_statd(0)[+25] IFS=~ :cl_update_statd(0)[+25] [[ public != public ]] :cl_update_statd(0)[+25] [[ boot != boot ]] :cl_update_statd(0)[+33] : Find the state of this candidate :cl_update_statd(0)[+33] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+37] :cl_update_statd(0)[+37] print 61.81.244.123 :cl_update_statd(0)[+37] tr ./ xx addr=i61x81x244x123_epprds :cl_update_statd(0)[+43] eval candidate_state=${i61x81x244x123_epprds:-down} :cl_update_statd(0)[+43] candidate_state=UP :cl_update_statd(0)[+46] : If state is UP, check to see if this node can talk to it :cl_update_statd(0)[+46] [[ UP == UP ]] :cl_update_statd(0)[+50] ping -w 5 -c 1 -q 61.81.244.123 :cl_update_statd(0)[+50] 1> /dev/null :cl_update_statd(0)[+61] echo epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+61] tr \n :cl_update_statd(0)[+62] read lcl_adapt lcl_type lcl_network lcl_net_type lcl_attrib lcl_node lcl_ip_addr skip lcl_interface skip lcl_netmask skip skip lcl_prefix lcl_ip_family :cl_update_statd(0)[+62] IFS=~ :cl_update_statd(0)[+62] [[ net_ether_01 != net_ether_01 ]] :cl_update_statd(0)[+62] [[ boot != boot ]] :cl_update_statd(0)[+62] [[ public != public ]] :cl_update_statd(0)[+62] [[ AF_INET != AF_INET ]] :cl_update_statd(0)[+62] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+71] :cl_update_statd(0)[+71] print 61.81.244.134 :cl_update_statd(0)[+71] tr ./ xx addr=i61x81x244x134_epprda :cl_update_statd(0)[+77] eval lcl_candidate_state=${i61x81x244x134_epprda:-down} :cl_update_statd(0)[+77] lcl_candidate_state=UP :cl_update_statd(0)[+77] [[ UP == UP ]] :cl_update_statd(0)[+81] : epprds is on the same network as an interface that is up :cl_update_statd(0)[+82] : on the local node, and the attributes match. :cl_update_statd(0)[+84] NewTwin=epprds :cl_update_statd(0)[+85] break :cl_update_statd(0)[+85] [[ -n epprds ]] :cl_update_statd(0)[+91] break :cl_update_statd(0)[+91] [[ -z epprds ]] :cl_update_statd(0)[+100] echo epprds :cl_update_statd(0)[+101] return 0 NEWTWIN=epprds :cl_update_statd(0)[+227] [[ -z epprds ]] :cl_update_statd(0)[+227] [[ epprds != ]] :cl_update_statd(0)[+243] : Need to register a new twin :cl_update_statd(0)[+243] [[ -n ]] :cl_update_statd(0)[+251] : Register our new twin, epprds :cl_update_statd(0)[+253] nfso -H sm_register epprds :cl_update_statd(0)[+254] RC=0 :cl_update_statd(0)[+259] : RC is actually 0 :cl_update_statd(0)[+266] return 0 +epprd_rg:rg_move_complete[155] (( 0 != 0 )) +epprd_rg:rg_move_complete[160] break +epprd_rg:rg_move_complete[166] : Set the RESOURCE_GROUPS environment variable with the names +epprd_rg:rg_move_complete[167] : of all resource groups participating in this event, and export +epprd_rg:rg_move_complete[168] : them to all successive scripts. +epprd_rg:rg_move_complete[170] set -a +epprd_rg:rg_move_complete[171] clsetenvgrp epprds rg_move_complete epprd_rg :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprds rg_move_complete epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 +epprd_rg:rg_move_complete[171] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_complete[172] RC=0 +epprd_rg:rg_move_complete[173] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_complete[1] FORCEDOWN_GROUPS='' +epprd_rg:rg_move_complete[2] RESOURCE_GROUPS='' +epprd_rg:rg_move_complete[3] HOMELESS_GROUPS='' +epprd_rg:rg_move_complete[4] HOMELESS_FOLLOWER_GROUPS='' +epprd_rg:rg_move_complete[5] ERRSTATE_GROUPS='' +epprd_rg:rg_move_complete[6] PRINCIPAL_ACTIONS='' +epprd_rg:rg_move_complete[7] ASSOCIATE_ACTIONS='' +epprd_rg:rg_move_complete[8] AUXILLIARY_ACTIONS='' +epprd_rg:rg_move_complete[8] SIBLING_GROUPS='' +epprd_rg:rg_move_complete[9] SIBLING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[10] SIBLING_ACQUIRING_GROUPS='' +epprd_rg:rg_move_complete[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[12] SIBLING_RELEASING_GROUPS='' +epprd_rg:rg_move_complete[13] SIBLING_RELEASING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[174] set +a +epprd_rg:rg_move_complete[175] (( 0 != 0 )) +epprd_rg:rg_move_complete[182] : For each participating resource group, serially process the resources. +epprd_rg:rg_move_complete[251] (( 1 == 1 )) +epprd_rg:rg_move_complete[253] [[ REAL == EMUL ]] +epprd_rg:rg_move_complete[259] stopsrc -s rpc.lockd 0513-044 The rpc.lockd Subsystem was requested to stop. +epprd_rg:rg_move_complete[260] rcstopsrc=0 +epprd_rg:rg_move_complete[261] (( 0 != 0 )) +epprd_rg:rg_move_complete[266] (( TRY=0)) +epprd_rg:rg_move_complete[266] (( 0<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 1<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 2<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 3<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 4<60)) +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z '' ]] +epprd_rg:rg_move_complete[273] break +epprd_rg:rg_move_complete[277] [[ ! -z '' ]] +epprd_rg:rg_move_complete[300] : Sure that rpc.lockd stopped. Restart it. +epprd_rg:rg_move_complete[302] startsrc -s rpc.lockd 0513-059 The rpc.lockd Subsystem has been started. Subsystem PID is 18743688. +epprd_rg:rg_move_complete[303] rcstartsrc=0 +epprd_rg:rg_move_complete[304] (( 0 != 0 )) +epprd_rg:rg_move_complete[365] : If the resource group in this rg_move is now homeless, +epprd_rg:rg_move_complete[366] : then we need to put it into an error state. +epprd_rg:rg_move_complete[368] active_node=0 +epprd_rg:rg_move_complete[428] : If the resource group in this rg_move is now homeless_secondary, +epprd_rg:rg_move_complete[429] : then we need to put it into an errorsecondary state. +epprd_rg:rg_move_complete[437] : Set an error state for concurrent groups that have +epprd_rg:rg_move_complete[438] : been brought offline on this node by rg_move. +epprd_rg:rg_move_complete[453] AM_SYNC_CALLED_BY=RG_MOVE_COMPLETE +epprd_rg:rg_move_complete[453] export AM_SYNC_CALLED_BY +epprd_rg:rg_move_complete[454] process_resources :process_resources[3318] version=1.169 :process_resources[3321] STATUS=0 :process_resources[3322] sddsrv_off=FALSE :process_resources[3324] true :process_resources[3326] : call rgpa, and it will tell us what to do next :process_resources[3328] set -a :process_resources[3329] clRGPA :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa 2023-01-28T18:04:44.248962 clrgpa :clRGPA[+55] exit 0 :process_resources[3329] eval JOB_TYPE=NONE :process_resources[1] JOB_TYPE=NONE :process_resources[3330] RC=0 :process_resources[3331] set +a :process_resources[3333] (( 0 != 0 )) :process_resources[3342] RESOURCE_GROUPS='' :process_resources[3343] GROUPNAME='' :process_resources[3343] export GROUPNAME :process_resources[3353] IS_SERVICE_START=1 :process_resources[3354] IS_SERVICE_STOP=1 :process_resources[3360] [[ NONE == RELEASE ]] :process_resources[3360] [[ NONE == ONLINE ]] :process_resources[3729] break :process_resources[3740] : If sddsrv was turned off above, turn it back on again :process_resources[3742] [[ FALSE == TRUE ]] :process_resources[3747] exit 0 +epprd_rg:rg_move_complete[455] STATUS=0 +epprd_rg:rg_move_complete[456] : The exit status of process_resources is: 0 +epprd_rg:rg_move_complete[461] unset AM_SYNC_CALLED_BY +epprd_rg:rg_move_complete[462] [[ TRUE == TRUE ]] +epprd_rg:rg_move_complete[491] [[ -z '' ]] +epprd_rg:rg_move_complete[493] RESOURCE_GROUPS=epprd_rg +epprd_rg:rg_move_complete[499] GROUPNAME=epprd_rg +epprd_rg:rg_move_complete[499] export GROUPNAME +epprd_rg:rg_move_complete[501] cl_rrmethods2call postrg_move +epprd_rg:cl_rrmethods2call[56] version=%I% +epprd_rg:cl_rrmethods2call[84] RRMETHODS='' +epprd_rg:cl_rrmethods2call[85] NEED_RR_ENV_VARS=no +epprd_rg:cl_rrmethods2call[124] NEED_RR_ENV_VARS=yes +epprd_rg:cl_rrmethods2call[129] : Set the '*_REP_RESOURCE' variables if needed. +epprd_rg:cl_rrmethods2call[131] [[ yes == yes ]] +epprd_rg:cl_rrmethods2call[133] cllsres +epprd_rg:cl_rrmethods2call[133] 2> /dev/null +epprd_rg:cl_rrmethods2call[133] eval APPLICATIONS='"epprd_app"' EXPORT_FILESYSTEM='"/board_org' '/sapmnt/EPP"' FILESYSTEM='""' FORCED_VARYON='"false"' FSCHECK_TOOL='"fsck"' FS_BEFORE_IPADDR='"false"' MOUNT_FILESYSTEM='"/board;/board_org"' RECOVERY_METHOD='"sequential"' SERVICE_LABEL='"epprd"' SSA_DISK_FENCING='"false"' VG_AUTO_IMPORT='"false"' VOLUME_GROUP='"datavg"' USERDEFINED_RESOURCES='""' +epprd_rg:cl_rrmethods2call[1] APPLICATIONS=epprd_app +epprd_rg:cl_rrmethods2call[1] EXPORT_FILESYSTEM='/board_org /sapmnt/EPP' +epprd_rg:cl_rrmethods2call[1] FILESYSTEM='' +epprd_rg:cl_rrmethods2call[1] FORCED_VARYON=false +epprd_rg:cl_rrmethods2call[1] FSCHECK_TOOL=fsck +epprd_rg:cl_rrmethods2call[1] FS_BEFORE_IPADDR=false +epprd_rg:cl_rrmethods2call[1] MOUNT_FILESYSTEM='/board;/board_org' +epprd_rg:cl_rrmethods2call[1] RECOVERY_METHOD=sequential +epprd_rg:cl_rrmethods2call[1] SERVICE_LABEL=epprd +epprd_rg:cl_rrmethods2call[1] SSA_DISK_FENCING=false +epprd_rg:cl_rrmethods2call[1] VG_AUTO_IMPORT=false +epprd_rg:cl_rrmethods2call[1] VOLUME_GROUP=datavg +epprd_rg:cl_rrmethods2call[1] USERDEFINED_RESOURCES='' +epprd_rg:cl_rrmethods2call[137] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[142] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[147] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[152] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[157] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[162] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[167] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[172] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[182] [[ -z '' ]] +epprd_rg:cl_rrmethods2call[184] typeset sysmgdata +epprd_rg:cl_rrmethods2call[185] typeset reposmgdata +epprd_rg:cl_rrmethods2call[186] [[ -x /usr/es/sbin/cluster/xd_generic/xd_cli/clxd_list_mg_smit ]] +epprd_rg:cl_rrmethods2call[191] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[191] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[197] echo '' +epprd_rg:cl_rrmethods2call[199] return 0 +epprd_rg:rg_move_complete[501] METHODS='' +epprd_rg:rg_move_complete[516] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. +epprd_rg:rg_move_complete[518] exit 0 Jan 28 2023 18:04:44 EVENT COMPLETED: rg_move_complete epprds 1 0 |2023-01-28T18:04:44|1581|EVENT COMPLETED: rg_move_complete epprds 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:04:44.370646 + echo '|2023-01-28T18:04:44.370646|INFO: rg_move_complete|epprd_rg|epprds|1|0' + 1>> /var/hacmp/availability/clavailability.log PowerHA SystemMirror Event Summary ---------------------------------------------------------------------------- Serial number for this event: 1581 Event: TE_RG_MOVE_ACQUIRE Start time: Sat Jan 28 18:04:38 2023 End time: Sat Jan 28 18:04:44 2023 Action: Resource: Script Name: ---------------------------------------------------------------------------- No resources changed as a result of this event ---------------------------------------------------------------------------- |EVENT_SUMMARY_START|TE_RG_MOVE_ACQUIRE|2023-01-28T18:04:38|2023-01-28T18:04:44|1581| |EVENT_NO_ACTION| |EVENT_SUMMARY_END| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 1581 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_NODE_DEP_COMPLETE|2023-01-28T18:04:46|1581| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Jan 28 2023 18:04:46 EVENT START: node_up_complete epprds |2023-01-28T18:04:46|1581|EVENT START: node_up_complete epprds| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:04:46.574153 + echo '|2023-01-28T18:04:46.574153|INFO: node_up_complete|epprds' + 1>> /var/hacmp/availability/clavailability.log + version=%I% + set -a + cllsparam -n epprda + eval NODE_NAME=epprda VERBOSE_LOGGING=high PS4=$'\'${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO]' $'\'' DEBUG_LEVEL=Standard LC_ALL=$'\'C\'' + NODE_NAME=epprda + VERBOSE_LOGGING=high :node_up_complete[1] PS4='${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO] ' :node_up_complete[1] DEBUG_LEVEL=Standard :node_up_complete[1] LC_ALL=C :node_up_complete[80] set +a :node_up_complete[82] NODENAME=epprds :node_up_complete[83] RC=0 :node_up_complete[83] typeset -i RC :node_up_complete[84] UPDATESTATD=0 :node_up_complete[84] typeset -i UPDATESTATD :node_up_complete[86] LPM_IN_PROGRESS_DIR=/var/hacmp/.lpm_in_progress :node_up_complete[86] typeset LPM_IN_PROGRESS_DIR :node_up_complete[87] LPM_IN_PROGRESS_PREFIX=lpm :node_up_complete[87] typeset LPM_IN_PROGRESS_PREFIX :node_up_complete[88] STATE_FILE=/var/hacmp/cl_dr.state :node_up_complete[88] typeset STATE_FILE :node_up_complete[97] STATUS=0 :node_up_complete[99] set -u :node_up_complete[101] (( 1 < 1 )) :node_up_complete[107] START_MODE='' :node_up_complete[107] typeset START_MODE :node_up_complete[108] (( 1 > 1 )) :node_up_complete[114] : serial number for this event is 1581 :node_up_complete[118] RPCLOCKDSTOPPED=0 :node_up_complete[118] typeset -i RPCLOCKDSTOPPED :node_up_complete[119] [[ -f /tmp/.RPCLOCKDSTOPPED ]] :node_up_complete[127] clnodename :node_up_complete[127] wc -l :node_up_complete[127] (( 2 == 2 )) :node_up_complete[129] clodmget -f group -n HACMPgroup :node_up_complete[129] RESOURCE_GROUPS=epprd_rg :node_up_complete[132] clodmget -q group='epprd_rg AND name=EXPORT_FILESYSTEM' -f value -n HACMPresource :node_up_complete[132] EXPORTLIST=$'/board_org\n/sapmnt/EPP' :node_up_complete[133] [[ -n $'/board_org\n/sapmnt/EPP' ]] :node_up_complete[135] UPDATESTATD=1 :node_up_complete[136] [[ epprds == epprda ]] :node_up_complete[146] cl_update_statd :cl_update_statd(0)[+174] version=%I% :cl_update_statd(0)[+176] typeset -i RC=0 :cl_update_statd(0)[+178] LOCAL_FOUND= :cl_update_statd(0)[+179] TWIN_NAME= :cl_update_statd(0)[+180] [[ -z epprda ]] :cl_update_statd(0)[+181] :cl_update_statd(0)[+181] cl_get_path -S OP_SEP=~ :cl_update_statd(0)[+182] set -u :cl_update_statd(0)[+187] LOCAL_FOUND=true :cl_update_statd(0)[+189] TWIN_NAME=epprds :cl_update_statd(0)[+194] : Make sure statd is running locally :cl_update_statd(0)[+196] lssrc -s statd :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw inoperative :cl_update_statd(0)[+196] rpcinfo -p :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw status :cl_update_statd(0)[+207] : Get the current twin, if there is one :cl_update_statd(0)[+209] :cl_update_statd(0)[+209] nfso -H sm_gethost :cl_update_statd(0)[+209] 2>& 1 CURTWIN=epprds :cl_update_statd(0)[+210] RC=0 :cl_update_statd(0)[+212] [[ -z true ]] :cl_update_statd(0)[+212] [[ -z epprds ]] :cl_update_statd(0)[+225] : Get the interface to the twin node :cl_update_statd(0)[+227] :cl_update_statd(0)[+227] get_node_ip epprds :cl_update_statd(0)[+9] (( 1 != 1 )) :cl_update_statd(0)[+15] Twin_Name=epprds :cl_update_statd(0)[+16] NewTwin= :cl_update_statd(0)[+19] : Get the Interface details for every interface on the twin node :cl_update_statd(0)[+20] : Reject interfaces on nodes that are not public boot addresses :cl_update_statd(0)[+21] : because those are the only ones we have state information for :cl_update_statd(0)[+23] :cl_update_statd(0)[+23] cllsif -J ~ -Sw -i epprda :cl_update_statd(0)[+23] LC_ALL=C LOCAL_NETWORK_INFO=epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+24] cllsif -J ~ -Sw -i epprds :cl_update_statd(0)[+24] :cl_update_statd(0)[+25] read adapt type network net_type attrib node ip_addr skip interface skip netmask skip skip prefix ip_family :cl_update_statd(0)[+25] IFS=~ LC_ALL=C :cl_update_statd(0)[+25] [[ public != public ]] :cl_update_statd(0)[+25] [[ boot != boot ]] :cl_update_statd(0)[+33] : Find the state of this candidate :cl_update_statd(0)[+33] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+37] :cl_update_statd(0)[+37] print 61.81.244.123 :cl_update_statd(0)[+37] tr ./ xx addr=i61x81x244x123_epprds :cl_update_statd(0)[+43] eval candidate_state=${i61x81x244x123_epprds:-down} :cl_update_statd(0)[+43] candidate_state=UP :cl_update_statd(0)[+46] : If state is UP, check to see if this node can talk to it :cl_update_statd(0)[+46] [[ UP == UP ]] :cl_update_statd(0)[+50] ping -w 5 -c 1 -q 61.81.244.123 :cl_update_statd(0)[+50] 1> /dev/null :cl_update_statd(0)[+61] echo epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+61] tr \n :cl_update_statd(0)[+62] read lcl_adapt lcl_type lcl_network lcl_net_type lcl_attrib lcl_node lcl_ip_addr skip lcl_interface skip lcl_netmask skip skip lcl_prefix lcl_ip_family :cl_update_statd(0)[+62] IFS=~ :cl_update_statd(0)[+62] [[ net_ether_01 != net_ether_01 ]] :cl_update_statd(0)[+62] [[ boot != boot ]] :cl_update_statd(0)[+62] [[ public != public ]] :cl_update_statd(0)[+62] [[ AF_INET != AF_INET ]] :cl_update_statd(0)[+62] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+71] :cl_update_statd(0)[+71] print 61.81.244.134 :cl_update_statd(0)[+71] tr ./ xx addr=i61x81x244x134_epprda :cl_update_statd(0)[+77] eval lcl_candidate_state=${i61x81x244x134_epprda:-down} :cl_update_statd(0)[+77] lcl_candidate_state=UP :cl_update_statd(0)[+77] [[ UP == UP ]] :cl_update_statd(0)[+81] : epprds is on the same network as an interface that is up :cl_update_statd(0)[+82] : on the local node, and the attributes match. :cl_update_statd(0)[+84] NewTwin=epprds :cl_update_statd(0)[+85] break :cl_update_statd(0)[+85] [[ -n epprds ]] :cl_update_statd(0)[+91] break :cl_update_statd(0)[+91] [[ -z epprds ]] :cl_update_statd(0)[+100] echo epprds :cl_update_statd(0)[+101] return 0 NEWTWIN=epprds :cl_update_statd(0)[+227] [[ -z epprds ]] :cl_update_statd(0)[+227] [[ epprds != epprds ]] :cl_update_statd(0)[+259] : RC is actually 0 :cl_update_statd(0)[+266] return 0 :node_up_complete[147] (( 0 )) :node_up_complete[151] break :node_up_complete[156] (( 1 )) :node_up_complete[158] (( 0 )) :node_up_complete[198] [[ TRUE == FALSE ]] :node_up_complete[268] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. :node_up_complete[270] : This is the final clRGinfo output :node_up_complete[272] clRGinfo -p -t :node_up_complete[272] 2>& 1 clRGinfo[431]: version I clRGinfo[517]: Number of resource groups = 0 clRGinfo[562]: cluster epprda_cluster is version = 22 clRGinfo[597]: no resource groups specified on command line - print all clRGinfo[685]: Current group is 'epprd_rg' get primary state info for state 6 get secondary state info for state 6 getPrimaryStateStr: using primary_table => primary_state_table get primary state info for state 4 get secondary state info for state 4 getPrimaryStateStr: using primary_table => primary_state_table Cluster Name: epprda_cluster Resource Group Name: epprd_rg Node Group State Delayed Timers ---------------------------------------------------------------- --------------- ------------------- epprda ONLINE epprds OFFLINE :node_up_complete[277] (( 0 == 0 )) :node_up_complete[279] [[ epprds != epprda ]] :node_up_complete[281] grep -w In_progress_file /var/hacmp/cl_dr.state :node_up_complete[281] 2> /dev/null :node_up_complete[281] cut -d= -f2 :node_up_complete[281] lpm_in_progress_file='' :node_up_complete[282] ls '/var/hacmp/.lpm_in_progress/lpm_*' :node_up_complete[282] 2> /dev/null :node_up_complete[282] lpm_in_progress_prefix='' :node_up_complete[283] [[ -n '' ]] :node_up_complete[300] exit 0 Jan 28 2023 18:04:46 EVENT COMPLETED: node_up_complete epprds 0 |2023-01-28T18:04:46|1581|EVENT COMPLETED: node_up_complete epprds 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:04:46.802525 + echo '|2023-01-28T18:04:46.802525|INFO: node_up_complete|epprds|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 18:05:20 EVENT START: admin_op clrm_stop_request 1582 0 |2023-01-28T18:05:20|1582|EVENT START: admin_op clrm_stop_request 1582 0| :admin_op[110] trap sigint_handler INT :admin_op[116] OP_TYPE=clrm_stop_request :admin_op[116] typeset OP_TYPE :admin_op[117] SERIAL=1582 :admin_op[117] typeset -li SERIAL :admin_op[118] INVALID=0 :admin_op[118] typeset -li INVALID The administrator initiated the following action at Sat Jan 28 18:05:20 KORST 2023 Check smit.log and clutils.log for additional details. Stopping PowerHA cluster services on node: epprda in graceful mode... Jan 28 2023 18:05:20 EVENT COMPLETED: admin_op clrm_stop_request 1582 0 0 |2023-01-28T18:05:20|1582|EVENT COMPLETED: admin_op clrm_stop_request 1582 0 0| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 1582 Stop cluster services request with 'Graceful' option received for 'epprda'. Enqueued rg_move release event for resource group epprd_rg. Node Down Completion Event has been enqueued. ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_FAIL_NODE_DEP|2023-01-28T18:05:20|1582| |STOP_CLUSTER_SERVICES|Graceful|epprda| |CLUSTER_RG_MOVE_RELEASE|epprd_rg| |NODE_DOWN_COMPLETE| |EVENT_PREAMBLE_END| Jan 28 2023 18:05:21 EVENT START: node_down epprda graceful |2023-01-28T18:05:21|1582|EVENT START: node_down epprda graceful| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:05:21.899853 + echo '|2023-01-28T18:05:21.899853|INFO: node_down|epprda|graceful' + 1>> /var/hacmp/availability/clavailability.log :node_down[64] version=%I% :node_down[67] NODENAME=epprda :node_down[67] export NODENAME :node_down[68] PARAM=graceful :node_down[68] export PARAM :node_down[75] STATUS=0 :node_down[75] typeset -li STATUS :node_down[77] AIX_SHUTDOWN=false :node_down[79] set -u :node_down[81] (( 2 < 1 )) :node_down[87] : serial number for this event is 1582 :node_down[91] : Clean up NFS state tracking :node_down[93] UPDATESTATDFILE=/usr/es/sbin/cluster/etc/updatestatd :node_down[94] rm -f /tmp/.RPCLOCKDSTOPPED :node_down[95] rm -f /usr/es/sbin/cluster/etc/updatestatd :node_down[96] UPDATESTATD=0 :node_down[97] export UPDATESTATD :node_down[100] : For RAS debugging, the result of ps -edf is captured at this time :node_down[102] : begin ps -edf :node_down[103] ps -edf UID PID PPID C STIME TTY TIME CMD root 1 0 0 Nov 16 - 0:01 /etc/init root 4260170 6095340 0 Nov 16 - 0:00 /usr/sbin/syslogd root 5046714 1 0 Nov 16 - 0:00 /usr/ccs/bin/shlap64 root 5177846 1 0 Nov 16 - 17:12 /usr/sbin/syncd 60 root 5898680 1 0 Nov 16 - 0:00 /usr/dt/bin/dtlogin -daemon root 5964246 1 0 Nov 16 - 0:00 /usr/lib/errdemon root 6029768 6095340 0 16:38:31 - 0:00 /usr/sbin/snmpd root 6095340 1 0 Nov 16 - 0:00 /usr/sbin/srcmstr root 6226176 6095340 0 Nov 16 - 0:00 /usr/sbin/inetd root 6357492 6095340 0 Nov 16 - 0:00 /usr/sbin/portmap root 6488536 6095340 0 Nov 16 - 0:56 /usr/sbin/xntpd -x root 6816230 6095340 0 Nov 16 - 0:04 /usr/sbin/hostmibd root 6881760 6095340 0 Nov 16 - 0:23 sendmail: accepting connections root 6947294 6095340 0 Nov 16 - 0:04 /usr/sbin/snmpmibd root 7143710 6095340 0 Nov 16 - 0:22 /usr/sbin/aixmibd root 7668214 1 0 Nov 16 - 0:11 /usr/sbin/cron root 7799282 6095340 0 Nov 16 - 1:12 /usr/sbin/aso daemon 7864678 6095340 0 17:10:55 - 0:00 /usr/sbin/rpc.statd -d 0 -t 50 root 7930136 6095340 0 Nov 16 - 0:00 /usr/sbin/qdaemon root 8061186 6095340 0 Nov 16 - 0:00 /usr/sbin/biod 6 root 8126748 1 0 Nov 16 - 0:00 /usr/sbin/uprintfd root 8520102 6095340 0 Nov 16 - 0:00 /usr/sbin/writesrv root 8585542 6095340 0 Nov 16 - 0:00 /usr/sbin/sshd root 8913186 6095340 0 Nov 16 - 0:00 /usr/sbin/pfcdaemon root 13959478 6095340 0 17:00:52 - 0:00 /opt/rsct/bin/rmcd -a IBM.LPCommands -r -S 1500 root 14025136 6095340 0 Nov 16 - 0:00 /usr/sbin/lldpd root 14090674 6095340 0 Nov 16 - 0:00 /usr/sbin/ecpvdpd root 14287294 1 0 Nov 16 - 1:26 /usr/bin/topasrec -L -s 300 -R 1 -r 6 -o /var/perf/daily/ -ypersistent=1 -O type=bin -ystart_time=15:11:38,Nov16,2022 root 14352890 6095340 0 Nov 16 - 0:04 /opt/rsct/bin/IBM.MgmtDomainRMd root 14614984 1 0 15:01:09 - 0:00 /usr/sbin/getty /dev/console root 14877148 6095340 0 Nov 16 - 0:00 /var/perf/pm/bin/pmperfrec root 15008234 6095340 0 Nov 16 - 0:00 /opt/rsct/bin/IBM.HostRMd root 15073556 6095340 0 Nov 16 - 0:00 /opt/rsct/bin/IBM.ServiceRMd root 15532528 6095340 0 Nov 16 - 0:00 /opt/rsct/bin/IBM.DRMd root 18088406 8585542 0 16:32:54 - 0:00 sshd: root@pts/5 root 18612628 8585542 0 17:57:26 - 0:00 sshd: root@pts/6 root 18743688 6095340 0 18:04:44 - 0:00 /usr/sbin/rpc.lockd -d 0 root 20054400 6095340 0 18:03:49 - 0:00 /usr/sbin/rpc.mountd root 20251054 22020420 0 16:48:17 pts/4 0:00 -ksh root 20447554 8585542 0 16:41:04 - 0:00 sshd: root@pts/7 root 20513024 20447554 0 16:41:07 pts/7 0:00 -ksh root 20709790 18088406 0 16:32:54 pts/5 0:00 -ksh root 20972018 6095340 0 17:07:08 - 0:00 /opt/rsct/bin/IBM.ConfigRMd root 21561614 26411472 0 17:54:30 pts/3 0:00 smitty mknfsexp root 21823786 18612628 0 17:57:27 pts/6 0:00 -ksh root 22020420 8585542 0 16:48:14 - 0:00 sshd: root@pts/4 root 22217160 28705070 4 18:05:21 - 0:00 /usr/es/sbin/cluster/events/cmd/clcallev node_down epprda graceful root 22610296 6095340 0 17:09:04 - 0:00 /opt/rsct/bin/IBM.StorageRMd root 23003588 1 0 00:00:00 - 0:00 /usr/bin/topas_nmon -f -d -t -s 300 -c 288 -youtput_dir=/ptf/nmon/epprda -ystart_time=00:00:00,Jan28,2023 root 25100574 1 0 0:00 root 25166118 25297194 0 17:40:42 pts/1 0:00 -ksh root 25297194 8585542 0 17:40:41 - 0:00 sshd: root@pts/1 root 25493908 28836144 0 18:05:21 - 0:00 ps -edf root 26214850 20513024 0 18:05:15 pts/7 0:00 smitty clstop root 26411472 27853206 0 17:50:59 pts/3 0:00 -ksh root 26607896 6095340 0 18:00:30 - 0:00 /usr/es/sbin/cluster/clstrmgr root 27394550 28311894 0 17:32:07 pts/0 0:00 -ksh root 27853206 8585542 0 17:50:59 - 0:00 sshd: root@pts/3 root 28180804 13959478 0 17:00:52 - 0:00 [trspoolm] root 28246396 6095340 0 17:10:21 - 0:00 /usr/sbin/gsclvmd root 28311894 8585542 0 17:32:06 - 0:00 sshd: root@pts/0 root 28377402 6095340 0 17:10:46 - 0:00 /usr/sbin/nfsd 3891 root 28705070 26607896 0 18:03:33 - 0:00 run_rcovcmd root 28770708 6095340 0 17:08:42 - 0:00 /usr/sbin/clconfd root 28836144 22217160 0 18:05:21 - 0:00 /bin/ksh93 /usr/es/sbin/cluster/events/node_down epprda graceful root 28901670 28246396 0 18:03:34 - 0:00 /usr/sbin/gsclvmd -r 30 -i 300 -t 300 -c 00c44af100004b00000001851e9dc053 -v 0 root 29098468 6095340 0 18:01:55 - 0:00 /usr/sbin/clcomd -d -g root 29163932 6095340 0 17:08:43 - 0:00 /usr/sbin/rsct/bin/hagsd cthags :node_down[104] : end ps -edf :node_down[107] : If RG_DEPENDENCIES is not false, all RG actions are taken via rg_move events. :node_down[109] [[ graceful != forced ]] :node_down[109] [[ TRUE == FALSE ]] :node_down[207] : Processing specific to the local node :node_down[209] [[ epprda == epprda ]] :node_down[212] : Stopping cluster services on epprda with the graceful option :node_down[214] [[ graceful != forced ]] :node_down[219] lsvg -L :node_down[219] lsvg -L -o :node_down[219] paste -s '-d|' - :node_down[219] grep -w -v -x -E 'datavg|caavg_private|rootvg' :node_down[219] INACTIVE_VGS='' :node_down[222] [[ -n '' ]] :node_down[272] unset PS4_LOOP :node_down[276] : update the location DB to indicate this node is going down :node_down[278] clchdaemons -r -d clstrmgr_scripts -t resource_locator :node_down[296] [[ -n false ]] :node_down[296] [[ false == true ]] :node_down[305] exit 0 Jan 28 2023 18:05:21 EVENT COMPLETED: node_down epprda graceful 0 |2023-01-28T18:05:22|1582|EVENT COMPLETED: node_down epprda graceful 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:05:22.045578 + echo '|2023-01-28T18:05:22.045578|INFO: node_down|epprda|graceful|0' + 1>> /var/hacmp/availability/clavailability.log PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 1586 Stop cluster services request with 'Graceful' option received for 'epprds'. Enqueued rg_move release event for resource group epprd_rg. Node Down Completion Event has been enqueued. ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_FAIL_NODE_DEP|2023-01-28T18:05:24|1586| |STOP_CLUSTER_SERVICES|Graceful|epprds| |CLUSTER_RG_MOVE_RELEASE|epprd_rg| |NODE_DOWN_COMPLETE| |EVENT_PREAMBLE_END| Jan 28 2023 18:05:25 EVENT START: node_down epprds graceful |2023-01-28T18:05:25|1586|EVENT START: node_down epprds graceful| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:05:25.745847 + echo '|2023-01-28T18:05:25.745847|INFO: node_down|epprds|graceful' + 1>> /var/hacmp/availability/clavailability.log :node_down[64] version=%I% :node_down[67] NODENAME=epprds :node_down[67] export NODENAME :node_down[68] PARAM=graceful :node_down[68] export PARAM :node_down[75] STATUS=0 :node_down[75] typeset -li STATUS :node_down[77] AIX_SHUTDOWN=false :node_down[79] set -u :node_down[81] (( 2 < 1 )) :node_down[87] : serial number for this event is 1586 :node_down[91] : Clean up NFS state tracking :node_down[93] UPDATESTATDFILE=/usr/es/sbin/cluster/etc/updatestatd :node_down[94] rm -f /tmp/.RPCLOCKDSTOPPED :node_down[95] rm -f /usr/es/sbin/cluster/etc/updatestatd :node_down[96] UPDATESTATD=0 :node_down[97] export UPDATESTATD :node_down[100] : For RAS debugging, the result of ps -edf is captured at this time :node_down[102] : begin ps -edf :node_down[103] ps -edf UID PID PPID C STIME TTY TIME CMD root 1 0 0 Nov 16 - 0:01 /etc/init root 4260170 6095340 0 Nov 16 - 0:00 /usr/sbin/syslogd root 5046714 1 0 Nov 16 - 0:00 /usr/ccs/bin/shlap64 root 5177846 1 0 Nov 16 - 17:12 /usr/sbin/syncd 60 root 5898680 1 0 Nov 16 - 0:00 /usr/dt/bin/dtlogin -daemon root 5964246 1 0 Nov 16 - 0:00 /usr/lib/errdemon root 6029768 6095340 0 16:38:31 - 0:00 /usr/sbin/snmpd root 6095340 1 0 Nov 16 - 0:00 /usr/sbin/srcmstr root 6226176 6095340 0 Nov 16 - 0:00 /usr/sbin/inetd root 6357492 6095340 0 Nov 16 - 0:00 /usr/sbin/portmap root 6488536 6095340 0 Nov 16 - 0:56 /usr/sbin/xntpd -x root 6816230 6095340 0 Nov 16 - 0:04 /usr/sbin/hostmibd root 6881760 6095340 0 Nov 16 - 0:23 sendmail: accepting connections root 6947294 6095340 0 Nov 16 - 0:04 /usr/sbin/snmpmibd root 7143710 6095340 0 Nov 16 - 0:22 /usr/sbin/aixmibd root 7668214 1 0 Nov 16 - 0:11 /usr/sbin/cron root 7799282 6095340 0 Nov 16 - 1:12 /usr/sbin/aso daemon 7864678 6095340 0 17:10:55 - 0:00 /usr/sbin/rpc.statd -d 0 -t 50 root 7930136 6095340 0 Nov 16 - 0:00 /usr/sbin/qdaemon root 8061186 6095340 0 Nov 16 - 0:00 /usr/sbin/biod 6 root 8126748 1 0 Nov 16 - 0:00 /usr/sbin/uprintfd root 8520102 6095340 0 Nov 16 - 0:00 /usr/sbin/writesrv root 8585542 6095340 0 Nov 16 - 0:00 /usr/sbin/sshd root 8913186 6095340 0 Nov 16 - 0:00 /usr/sbin/pfcdaemon root 13959478 6095340 0 17:00:52 - 0:00 /opt/rsct/bin/rmcd -a IBM.LPCommands -r -S 1500 root 14025136 6095340 0 Nov 16 - 0:00 /usr/sbin/lldpd root 14090674 6095340 0 Nov 16 - 0:00 /usr/sbin/ecpvdpd root 14287294 1 0 Nov 16 - 1:26 /usr/bin/topasrec -L -s 300 -R 1 -r 6 -o /var/perf/daily/ -ypersistent=1 -O type=bin -ystart_time=15:11:38,Nov16,2022 root 14352890 6095340 0 Nov 16 - 0:04 /opt/rsct/bin/IBM.MgmtDomainRMd root 14614984 1 0 15:01:09 - 0:00 /usr/sbin/getty /dev/console root 14877148 6095340 0 Nov 16 - 0:00 /var/perf/pm/bin/pmperfrec root 15008234 6095340 0 Nov 16 - 0:00 /opt/rsct/bin/IBM.HostRMd root 15073556 6095340 0 Nov 16 - 0:00 /opt/rsct/bin/IBM.ServiceRMd root 15532528 6095340 0 Nov 16 - 0:00 /opt/rsct/bin/IBM.DRMd root 18088406 8585542 0 16:32:54 - 0:00 sshd: root@pts/5 root 18612628 8585542 0 17:57:26 - 0:00 sshd: root@pts/6 root 18743688 6095340 0 18:04:44 - 0:00 /usr/sbin/rpc.lockd -d 0 root 20054400 6095340 0 18:03:49 - 0:00 /usr/sbin/rpc.mountd root 20251054 22020420 0 16:48:17 pts/4 0:00 -ksh root 20447554 8585542 0 16:41:04 - 0:00 sshd: root@pts/7 root 20513024 20447554 0 16:41:07 pts/7 0:00 -ksh root 20709790 18088406 0 16:32:54 pts/5 0:00 -ksh root 20972018 6095340 0 17:07:08 - 0:00 /opt/rsct/bin/IBM.ConfigRMd root 21561614 26411472 0 17:54:30 pts/3 0:00 smitty mknfsexp root 21823786 18612628 0 17:57:27 pts/6 0:00 -ksh root 22020420 8585542 0 16:48:14 - 0:00 sshd: root@pts/4 root 22217166 29098468 0 0:00 root 22610296 6095340 0 17:09:04 - 0:00 /opt/rsct/bin/IBM.StorageRMd root 23003588 1 0 00:00:00 - 0:00 /usr/bin/topas_nmon -f -d -t -s 300 -c 288 -youtput_dir=/ptf/nmon/epprda -ystart_time=00:00:00,Jan28,2023 root 25166118 25297194 0 17:40:42 pts/1 0:00 -ksh root 25297194 8585542 0 17:40:41 - 0:00 sshd: root@pts/1 root 25362708 27787750 0 18:05:25 - 0:00 /bin/ksh93 /usr/es/sbin/cluster/events/node_down epprds graceful root 25493954 29098468 0 0:00 root 26214850 20513024 0 18:05:15 pts/7 0:00 smitty clstop root 26411472 27853206 0 17:50:59 pts/3 0:00 -ksh root 26607896 6095340 0 18:00:30 - 0:00 /usr/es/sbin/cluster/clstrmgr root 27394550 28311894 0 17:32:07 pts/0 0:00 -ksh root 27787750 28705070 4 18:05:25 - 0:00 /usr/es/sbin/cluster/events/cmd/clcallev node_down epprds graceful root 27853206 8585542 0 17:50:59 - 0:00 sshd: root@pts/3 root 27918684 25362708 0 18:05:25 - 0:00 ps -edf root 28180804 13959478 0 17:00:52 - 0:00 [trspoolm] root 28246396 6095340 0 17:10:21 - 0:00 /usr/sbin/gsclvmd root 28311894 8585542 0 17:32:06 - 0:00 sshd: root@pts/0 root 28377402 6095340 0 17:10:46 - 0:00 /usr/sbin/nfsd 3891 root 28705070 26607896 0 18:03:33 - 0:00 run_rcovcmd root 28770708 6095340 0 17:08:42 - 0:00 /usr/sbin/clconfd root 28836148 22217166 0 0:00 root 28901670 28246396 0 18:03:34 - 0:00 /usr/sbin/gsclvmd -r 30 -i 300 -t 300 -c 00c44af100004b00000001851e9dc053 -v 0 root 29098468 6095340 0 18:01:55 - 0:00 /usr/sbin/clcomd -d -g root 29163932 6095340 1 17:08:43 - 0:00 /usr/sbin/rsct/bin/hagsd cthags :node_down[104] : end ps -edf :node_down[107] : If RG_DEPENDENCIES is not false, all RG actions are taken via rg_move events. :node_down[109] [[ graceful != forced ]] :node_down[109] [[ TRUE == FALSE ]] :node_down[207] : Processing specific to the local node :node_down[209] [[ epprds == epprda ]] :node_down[284] : epprds, is not the local node, handle fencing for any VGs marked as $'\'CRITICAL\'.' :node_down[286] cl_fence_vg epprds :cl_fence_vg[336] version=%I% :cl_fence_vg[341] : Collect list of disks, for use later :cl_fence_vg[343] lspv :cl_fence_vg[343] lspv_out=$'hdisk0 00c44af155592938 rootvg active \nhdisk1 00c44af11e9e1645 caavg_private active \nhdisk2 00c44af11e8a9c69 datavg concurrent \nhdisk3 00c44af11e8a9cd7 datavg concurrent \nhdisk4 00c44af11e8a9d3c datavg concurrent \nhdisk5 00c44af11e8a9c05 datavg concurrent \nhdisk6 00c44af11e8a9e05 datavg concurrent \nhdisk7 00c44af11e8a9d9f datavg concurrent \nhdisk8 00c44af11e8a9e69 datavg concurrent ' :cl_fence_vg[345] [[ -z epprda ]] :cl_fence_vg[354] : Accept a formal parameter of 'name of node that failed' if none were set :cl_fence_vg[355] : in the environment :cl_fence_vg[357] EVENTNODE=epprds :cl_fence_vg[359] [[ -z epprds ]] :cl_fence_vg[368] : An explicit volume group list can be passed after the name of :cl_fence_vg[369] : the node that failed. Pick up any such :cl_fence_vg[371] shift :cl_fence_vg[372] vg_list='' :cl_fence_vg[374] common_groups='' :cl_fence_vg[375] common_critical_vgs='' :cl_fence_vg[377] [[ -z '' ]] :cl_fence_vg[380] : Find all the concurrent resource groups that contain both epprds and epprda :cl_fence_vg[382] clodmget -q 'startup_pref = OAAN' -f group -n HACMPgroup :cl_fence_vg[424] : Look at each of the resource groups in turn to determine what CRITICAL :cl_fence_vg[425] : volume groups the local node epprda share access with epprds :cl_fence_vg[443] : Process the list of common volume groups, :node_down[296] [[ -n false ]] :node_down[296] [[ false == true ]] :node_down[305] exit 0 Jan 28 2023 18:05:25 EVENT COMPLETED: node_down epprds graceful 0 |2023-01-28T18:05:25|1586|EVENT COMPLETED: node_down epprds graceful 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:05:25.891627 + echo '|2023-01-28T18:05:25.891627|INFO: node_down|epprds|graceful|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 18:05:28 EVENT START: rg_move_release epprda 1 |2023-01-28T18:05:28|1583|EVENT START: rg_move_release epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:05:28.109549 + echo '|2023-01-28T18:05:28.109549|INFO: rg_move_release|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_release[+54] [[ high = high ]] :rg_move_release[+54] version=1.6 :rg_move_release[+56] set -u :rg_move_release[+58] [ 2 != 2 ] :rg_move_release[+64] set +u :rg_move_release[+66] clcallev rg_move epprda 1 RELEASE Jan 28 2023 18:05:28 EVENT START: rg_move epprda 1 RELEASE |2023-01-28T18:05:28|1583|EVENT START: rg_move epprda 1 RELEASE| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-01-28T18:05:28.235497 :clevlog[amlog_trace:320] echo '|2023-01-28T18:05:28.235497|INFO: rg_move|epprd_rg|epprda|1|RELEASE' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :rg_move[76] version=%I% :rg_move[86] STATUS=0 :rg_move[88] [[ ! -n '' ]] :rg_move[90] EMULATE=REAL :rg_move[96] set -u :rg_move[98] NODENAME=epprda :rg_move[98] export NODENAME :rg_move[99] RGID=1 :rg_move[100] (( 3 == 3 )) :rg_move[102] ACTION=RELEASE :rg_move[108] : serial number for this event is 1583 :rg_move[112] RG_UP_POSTEVENT_ON_NODE=epprda :rg_move[112] export RG_UP_POSTEVENT_ON_NODE :rg_move[116] clodmget -qid=1 -f group -n HACMPgroup :rg_move[116] eval RGNAME=epprd_rg :rg_move[1] RGNAME=epprd_rg :rg_move[118] UPDATESTATD=0 :rg_move[119] export UPDATESTATD :rg_move[123] RG_MOVE_EVENT=true :rg_move[123] export RG_MOVE_EVENT :rg_move[128] group_state='$RESGRP_epprd_rg_epprda' :rg_move[129] set +u :rg_move[130] eval print '$RESGRP_epprd_rg_epprda' :rg_move[1] print ONLINE :rg_move[130] RG_MOVE_ONLINE=ONLINE :rg_move[130] export RG_MOVE_ONLINE :rg_move[131] set -u :rg_move[132] RG_MOVE_ONLINE=ONLINE :rg_move[139] rm -f /tmp/.NFSSTOPPED :rg_move[140] rm -f /tmp/.RPCLOCKDSTOPPED :rg_move[147] set -a :rg_move[148] clsetenvgrp epprda rg_move epprd_rg :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 :rg_move[148] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[149] RC=0 :rg_move[150] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[1] FORCEDOWN_GROUPS='' :rg_move[2] RESOURCE_GROUPS='' :rg_move[3] HOMELESS_GROUPS='' :rg_move[4] HOMELESS_FOLLOWER_GROUPS='' :rg_move[5] ERRSTATE_GROUPS='' :rg_move[6] PRINCIPAL_ACTIONS='' :rg_move[7] ASSOCIATE_ACTIONS='' :rg_move[8] AUXILLIARY_ACTIONS='' :rg_move[8] SIBLING_GROUPS='' :rg_move[9] SIBLING_NODES_BY_GROUP='' :rg_move[10] SIBLING_ACQUIRING_GROUPS='' :rg_move[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' :rg_move[12] SIBLING_RELEASING_GROUPS='' :rg_move[13] SIBLING_RELEASING_NODES_BY_GROUP='' :rg_move[151] set +a :rg_move[155] (( 0 != 0 )) :rg_move[155] [[ -z epprd_rg ]] :rg_move[164] [[ -z TRUE ]] :rg_move[241] AM_SYNC_CALLED_BY=RG_MOVE :rg_move[241] export AM_SYNC_CALLED_BY :rg_move[242] process_resources :process_resources[3318] version=1.169 :process_resources[3321] STATUS=0 :process_resources[3322] sddsrv_off=FALSE :process_resources[3324] true :process_resources[3326] : call rgpa, and it will tell us what to do next :process_resources[3328] set -a :process_resources[3329] clRGPA :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa 2023-01-28T18:05:28.356391 clrgpa :clRGPA[+55] exit 0 :process_resources[3329] eval JOB_TYPE=RELEASE RESOURCE_GROUPS='"epprd_rg"' PRINCIPAL_ACTION='"RELEASE"' AUXILLIARY_ACTION='"NONE"' :process_resources[1] JOB_TYPE=RELEASE :process_resources[1] RESOURCE_GROUPS=epprd_rg :process_resources[1] PRINCIPAL_ACTION=RELEASE :process_resources[1] AUXILLIARY_ACTION=NONE :process_resources[3330] RC=0 :process_resources[3331] set +a :process_resources[3333] (( 0 != 0 )) :process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3363] INFO_STRING='' +epprd_rg:process_resources[3364] clnodename +epprd_rg:process_resources[3373] ENV_VAR=GROUP_epprd_rg_epprda +epprd_rg:process_resources[3374] eval 'echo $GROUP_epprd_rg_epprda' +epprd_rg:process_resources[1] echo ISUPPREEVENT +epprd_rg:process_resources[3374] read ENV_VAR +epprd_rg:process_resources[3375] [[ ISUPPREEVENT == WILLBEUPPOSTEVENT ]] +epprd_rg:process_resources[3379] [[ ISUPPREEVENT == ISUPPREEVENT ]] +epprd_rg:process_resources[3380] INFO_STRING='|SOURCE=epprda' +epprd_rg:process_resources[3381] IS_SERVICE_START=0 +epprd_rg:process_resources[3373] ENV_VAR=GROUP_epprd_rg_epprds +epprd_rg:process_resources[3374] eval 'echo $GROUP_epprd_rg_epprds' +epprd_rg:process_resources[1] echo +epprd_rg:process_resources[3374] read ENV_VAR +epprd_rg:process_resources[3375] [[ '' == WILLBEUPPOSTEVENT ]] +epprd_rg:process_resources[3379] [[ '' == ISUPPREEVENT ]] +epprd_rg:process_resources[3384] (( 0 == 0 && 1 ==0 )) +epprd_rg:process_resources[3660] set_resource_group_state RELEASING +epprd_rg:process_resources[set_resource_group_state:82] PS4_FUNC=set_resource_group_state +epprd_rg:process_resources[set_resource_group_state:82] typeset PS4_FUNC +epprd_rg:process_resources[set_resource_group_state:83] [[ high == high ]] +epprd_rg:process_resources[set_resource_group_state:83] set -x +epprd_rg:process_resources[set_resource_group_state:84] STAT=0 +epprd_rg:process_resources[set_resource_group_state:85] new_status=RELEASING +epprd_rg:process_resources[set_resource_group_state:89] export GROUPNAME +epprd_rg:process_resources[set_resource_group_state:90] [[ RELEASING != DOWN ]] +epprd_rg:process_resources[set_resource_group_state:92] clchdaemons -d clstrmgr_scripts -t resource_locator -n epprda -o epprd_rg -v RELEASING +epprd_rg:process_resources[set_resource_group_state:100] : Resource Manager Updates +epprd_rg:process_resources[set_resource_group_state:111] amlog_trace '' 'acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:318] clcycle clavailability.log +epprd_rg:process_resources[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:process_resources[amlog_trace:319] cltime +epprd_rg:process_resources[amlog_trace:319] DATE=2023-01-28T18:05:28.400998 +epprd_rg:process_resources[amlog_trace:320] echo '|2023-01-28T18:05:28.400998|INFO: acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:process_resources[set_resource_group_state:112] cl_RMupdate releasing epprd_rg process_resources 2023-01-28T18:05:28.424814 2023-01-28T18:05:28.429209 +epprd_rg:process_resources[set_resource_group_state:153] return 0 +epprd_rg:process_resources[3661] RC=0 +epprd_rg:process_resources[3662] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:05:28.441071 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=APPLICATIONS ACTION=RELEASE ALL_APPLICATIONS='"epprd_app"' RESOURCE_GROUPS='"epprd_rg' '"' MISCDATA='""' +epprd_rg:process_resources[1] JOB_TYPE=APPLICATIONS +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] ALL_APPLICATIONS=epprd_app +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] MISCDATA='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ APPLICATIONS == RELEASE ]] +epprd_rg:process_resources[3360] [[ APPLICATIONS == ONLINE ]] +epprd_rg:process_resources[3549] process_applications RELEASE +epprd_rg:process_resources[process_applications:312] PS4_FUNC=process_applications +epprd_rg:process_resources[process_applications:312] typeset PS4_FUNC +epprd_rg:process_resources[process_applications:313] [[ high == high ]] +epprd_rg:process_resources[process_applications:313] set -x +epprd_rg:process_resources[process_applications:316] : Each subprocess will log to a file with this name and PID +epprd_rg:process_resources[process_applications:318] TMP_FILE=/var/hacmp/log/.process_resources_applications.25362784 +epprd_rg:process_resources[process_applications:318] export TMP_FILE +epprd_rg:process_resources[process_applications:320] rm -f '/var/hacmp/log/.process_resources_applications*' +epprd_rg:process_resources[process_applications:322] WAITPIDS='' +epprd_rg:process_resources[process_applications:323] LPAR_ACQUIRE_FAILED=0 +epprd_rg:process_resources[process_applications:324] LPAR_RELEASE_FAILED=0 +epprd_rg:process_resources[process_applications:325] START_STOP_FAILED=0 +epprd_rg:process_resources[process_applications:326] LIST_OF_APPS=epprd_app +epprd_rg:process_resources[process_applications:329] : Acquire lpar resources in one-shot before starting applications +epprd_rg:process_resources[process_applications:331] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[process_applications:343] (( LPAR_ACQUIRE_FAILED == 0 )) +epprd_rg:process_resources[process_applications:345] : Loop through all groups to start or stop applications +epprd_rg:process_resources[process_applications:348] export GROUPNAME +epprd_rg:process_resources[process_applications:351] : Break out application data +epprd_rg:process_resources[process_applications:353] get_list_head epprd_app +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo epprd_app +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo epprd_app +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[process_applications:353] read LIST_OF_APPLICATIONS_FOR_RG +epprd_rg:process_resources[process_applications:354] get_list_tail epprd_app +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo epprd_app +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[process_applications:354] read ALL_APPLICATIONS +epprd_rg:process_resources[process_applications:356] get_list_head +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[process_applications:356] read MISCDATA_FOR_RG +epprd_rg:process_resources[process_applications:357] get_list_tail +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[process_applications:357] read MISCDATA +epprd_rg:process_resources[process_applications:359] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[process_applications:363] TMPLIST='' +epprd_rg:process_resources[process_applications:364] print epprd_app +epprd_rg:process_resources[process_applications:364] set -A appnames epprd_app +epprd_rg:process_resources[process_applications:366] (( cnt=0)) +epprd_rg:process_resources[process_applications:366] (( cnt < 1)) +epprd_rg:process_resources[process_applications:367] TMPLIST='epprd_app ' +epprd_rg:process_resources[process_applications:368] LIST_OF_APPLICATIONS_FOR_RG=epprd_app +epprd_rg:process_resources[process_applications:366] ((cnt++ )) +epprd_rg:process_resources[process_applications:366] (( cnt < 1)) +epprd_rg:process_resources[process_applications:371] LIST_OF_APPLICATIONS_FOR_RG='epprd_app ' +epprd_rg:process_resources[process_applications:374] APPLICATIONS='epprd_app ' +epprd_rg:process_resources[process_applications:374] export APPLICATIONS +epprd_rg:process_resources[process_applications:375] MISC_DATA='' +epprd_rg:process_resources[process_applications:375] export MISC_DATA +epprd_rg:process_resources[process_applications:378] : Now call start_or_stop_applications_for_rg to do the app start/stop. +epprd_rg:process_resources[process_applications:384] : Add PID of the last bg start_or_stop_applications_for_rg process to WAITPIDS. +epprd_rg:process_resources[process_applications:386] WAITPIDS=' 26018290' +epprd_rg:process_resources[process_applications:390] : Wait for the start_or_stop_applications_for_rg PIDs to finish. +epprd_rg:process_resources[process_applications:393] wait 26018290 +epprd_rg:process_resources[process_applications:381] start_or_stop_applications_for_rg RELEASE /var/hacmp/log/.process_resources_applications.25362784.epprd_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:248] PS4_FUNC=start_or_stop_applications_for_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:248] typeset PS4_FUNC +epprd_rg:process_resources[start_or_stop_applications_for_rg:249] [[ high == high ]] +epprd_rg:process_resources[start_or_stop_applications_for_rg:249] set -x +epprd_rg:process_resources[start_or_stop_applications_for_rg:251] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[start_or_stop_applications_for_rg:255] cmd_to_execute=stop_server +epprd_rg:process_resources[start_or_stop_applications_for_rg:259] : File name to store our exit status +epprd_rg:process_resources[start_or_stop_applications_for_rg:261] STATUS_FILE=/var/hacmp/log/.process_resources_applications.25362784.epprd_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:264] : Use clcallev to run the event +epprd_rg:process_resources[start_or_stop_applications_for_rg:266] clcallev stop_server 'epprd_app ' Jan 28 2023 18:05:28 EVENT START: stop_server epprd_app |2023-01-28T18:05:28|1583|EVENT START: stop_server epprd_app | +epprd_rg:stop_server[+59] version=%I% +epprd_rg:stop_server[+62] STATUS=0 +epprd_rg:stop_server[+66] [ ! -n ] +epprd_rg:stop_server[+68] EMULATE=REAL +epprd_rg:stop_server[+71] PROC_RES=false +epprd_rg:stop_server[+75] [[ APPLICATIONS != 0 ]] +epprd_rg:stop_server[+75] [[ APPLICATIONS != GROUP ]] +epprd_rg:stop_server[+76] PROC_RES=true +epprd_rg:stop_server[+79] typeset WPARNAME WPARDIR EXEC +epprd_rg:stop_server[+80] WPARDIR= +epprd_rg:stop_server[+81] EXEC= +epprd_rg:stop_server[+83] typeset -i rc=0 +epprd_rg:stop_server[+84] +epprd_rg:stop_server[+84] clwparname epprd_rg +epprd_rg:clwparname[38] version=1.3.1.1 +epprd_rg:clwparname[44] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clwparname[44] [[ -z '' ]] +epprd_rg:clwparname[44] exit 0 WPARNAME= +epprd_rg:stop_server[+85] rc=0 +epprd_rg:stop_server[+87] set -u +epprd_rg:stop_server[+90] ALLSERVERS=All_servers +epprd_rg:stop_server[+91] [ REAL = EMUL ] +epprd_rg:stop_server[+96] cl_RMupdate resource_releasing All_servers stop_server 2023-01-28T18:05:28.589877 2023-01-28T18:05:28.594076 +epprd_rg:stop_server[+101] (( 0 == 0 )) +epprd_rg:stop_server[+101] [[ -n ]] +epprd_rg:stop_server[+120] +epprd_rg:stop_server[+120] cllsserv -cn epprd_app +epprd_rg:stop_server[+120] cut -d: -f3 STOP=/etc/hacmp/epprd_stop.sh +epprd_rg:stop_server[+121] +epprd_rg:stop_server[+121] echo /etc/hacmp/epprd_stop.sh +epprd_rg:stop_server[+121] cut -d -f1 STOP_SCRIPT=/etc/hacmp/epprd_stop.sh +epprd_rg:stop_server[+123] PATTERN=epprda epprd_app +epprd_rg:stop_server[+123] [[ -n ]] +epprd_rg:stop_server[+123] [[ -z ]] +epprd_rg:stop_server[+123] [[ -x /etc/hacmp/epprd_stop.sh ]] +epprd_rg:stop_server[+133] [ REAL = EMUL ] +epprd_rg:stop_server[+139] amlog_trace Stopping application controller|epprd_app +epprd_rg:stop_server[+55] clcycle clavailability.log +epprd_rg:stop_server[+55] 1> /dev/null 2>& 1 +epprd_rg:stop_server[+55] +epprd_rg:stop_server[+55] cltime DATE=2023-01-28T18:05:28.628458 +epprd_rg:stop_server[+55] echo |2023-01-28T18:05:28.628458|INFO: Stopping application controller|epprd_app +epprd_rg:stop_server[+55] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:stop_server[+140] /etc/hacmp/epprd_stop.sh +epprd_rg:stop_server[+140] ODMDIR=/etc/objrepos +epprd_rg:stop_server[+141] rc=0 +epprd_rg:stop_server[+143] (( rc != 0 )) +epprd_rg:stop_server[+151] amlog_trace Stopping application controller|epprd_app +epprd_rg:stop_server[+55] clcycle clavailability.log +epprd_rg:stop_server[+55] 1> /dev/null 2>& 1 +epprd_rg:stop_server[+55] +epprd_rg:stop_server[+55] cltime DATE=2023-01-28T18:05:28.657111 +epprd_rg:stop_server[+55] echo |2023-01-28T18:05:28.657111|INFO: Stopping application controller|epprd_app +epprd_rg:stop_server[+55] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:stop_server[+174] ALLNOERRSERV=All_nonerror_servers +epprd_rg:stop_server[+175] [ REAL = EMUL ] +epprd_rg:stop_server[+180] cl_RMupdate resource_down All_nonerror_servers stop_server 2023-01-28T18:05:28.679470 2023-01-28T18:05:28.683714 +epprd_rg:stop_server[+183] exit 0 Jan 28 2023 18:05:28 EVENT COMPLETED: stop_server epprd_app 0 |2023-01-28T18:05:28|1583|EVENT COMPLETED: stop_server epprd_app 0| +epprd_rg:process_resources[start_or_stop_applications_for_rg:267] RC=0 +epprd_rg:process_resources[start_or_stop_applications_for_rg:269] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[start_or_stop_applications_for_rg:279] (( 0 != 0 )) +epprd_rg:process_resources[start_or_stop_applications_for_rg:291] : Store the result for later accumulation +epprd_rg:process_resources[start_or_stop_applications_for_rg:293] print 'epprd_rg 0' +epprd_rg:process_resources[start_or_stop_applications_for_rg:293] 1>> /var/hacmp/log/.process_resources_applications.25362784.epprd_rg +epprd_rg:process_resources[process_applications:396] : Look at all the status files to see if any were unsuccessful +epprd_rg:process_resources[process_applications:399] cat /var/hacmp/log/.process_resources_applications.25362784.epprd_rg +epprd_rg:process_resources[process_applications:399] read skip SUCCESS rest +epprd_rg:process_resources[process_applications:401] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:411] rm -f /var/hacmp/log/.process_resources_applications.25362784.epprd_rg +epprd_rg:process_resources[process_applications:416] : Release lpar resources in one-shot now that applications are stopped +epprd_rg:process_resources[process_applications:418] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[process_applications:420] GROUPNAME=epprd_rg +epprd_rg:process_resources[process_applications:420] export GROUPNAME +epprd_rg:process_resources[process_applications:421] clmanageroha -o release -s -l epprd_app +epprd_rg:process_resources[process_applications:421] 3>& 2 +epprd_rg:clmanageroha[318] : version='@(#)' 5881272 43haes/usr/sbin/cluster/events/clmanageroha.sh, 61aha_r726, 2205A_aha726, May 16 2022 12:15 PM +epprd_rg:clmanageroha[321] clodmget -n -f connection_type HACMPhmcparam +epprd_rg:clmanageroha[321] CONN_TYPE=0 +epprd_rg:clmanageroha[321] typeset -i CONN_TYPE +epprd_rg:clmanageroha[323] clodmget -q name='epprda and object like POWERVS_*' -nf name HACMPnode +epprd_rg:clmanageroha[323] 2> /dev/null +epprd_rg:clmanageroha[323] [[ -n '' ]] +epprd_rg:clmanageroha[326] export CONN_TYPE +epprd_rg:clmanageroha[331] roha_session_open -o release -s -l epprd_app +epprd_rg:clmanageroha[roha_session_open:131] roha_session.id=26018296 +epprd_rg:clmanageroha[roha_session_open:132] date +epprd_rg:clmanageroha[roha_session_open:132] LC_ALL=C +epprd_rg:clmanageroha[roha_session_open:132] roha_session_log 'Open session 26018296 at Sat Jan 28 18:05:28 KORST 2023' [ROHALOG:26018296:(0.067)] Open session 26018296 at Sat Jan 28 18:05:28 KORST 2023 +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:146] roha_session.operation=release +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:143] roha_session.systemmirror_mode=1 +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:149] roha_session.optimal_apps=epprd_app +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:163] [[ release != @(acquire|release|adjust) ]] +epprd_rg:clmanageroha[roha_session_open:168] no_roha_apps=0 +epprd_rg:clmanageroha[roha_session_open:168] typeset -i no_roha_apps +epprd_rg:clmanageroha[roha_session_open:169] need_explicit_res_rel=0 +epprd_rg:clmanageroha[roha_session_open:169] typeset -i need_explicit_res_rel +epprd_rg:clmanageroha[roha_session_open:187] [[ -n epprd_app ]] +epprd_rg:clmanageroha[roha_session_open:187] clmgr q roha +epprd_rg:clmanageroha[roha_session_open:187] sort +epprd_rg:clmanageroha[roha_session_open:187] uniq -d +epprd_rg:clmanageroha[roha_session_open:187] echo epprd_app +epprd_rg:clmanageroha[roha_session_open:187] sort -u +epprd_rg:clmanageroha[roha_session_open:187] echo '\nepprd_app' +epprd_rg:clmanageroha[roha_session_open:187] [[ -z '' ]] +epprd_rg:clmanageroha[roha_session_open:189] roha_session_log 'INFO: No ROHA configured on applications.\n' [ROHALOG:26018296:(0.517)] INFO: No ROHA configured on applications. [ROHALOG:26018296:(0.517)] +epprd_rg:clmanageroha[roha_session_open:190] no_roha_apps=1 +epprd_rg:clmanageroha[roha_session_open:195] read_tunables +epprd_rg:clmanageroha[roha_session_open:196] echo '' +epprd_rg:clmanageroha[roha_session_open:196] grep -q epprda +epprd_rg:clmanageroha[roha_session_open:197] (( 1 == 0 )) +epprd_rg:clmanageroha[roha_session_open:202] (( 1 == 1 )) +epprd_rg:clmanageroha[roha_session_open:203] roha_session_read_odm_dynresop DLPAR_MEM +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_MEM -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:203] (( 0 == 0.00 )) +epprd_rg:clmanageroha[roha_session_open:204] roha_session_read_odm_dynresop DLPAR_PROCS +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_PROCS -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:204] (( 0 == 0 )) +epprd_rg:clmanageroha[roha_session_open:205] roha_session_read_odm_dynresop DLPAR_PROC_UNITS +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_PROC_UNITS -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:205] (( 0 == 0.00 )) +epprd_rg:clmanageroha[roha_session_open:206] roha_session_log 'INFO: Nothing to be done.\n' [ROHALOG:26018296:(0.570)] INFO: Nothing to be done. [ROHALOG:26018296:(0.570)] +epprd_rg:clmanageroha[roha_session_open:207] exit 0 +epprd_rg:process_resources[process_applications:422] RC=0 +epprd_rg:process_resources[process_applications:423] (( 0 != 0 )) +epprd_rg:process_resources[process_applications:433] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:434] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:435] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:439] return 0 +epprd_rg:process_resources[3550] RC=0 +epprd_rg:process_resources[3551] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3553] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:05:29.359746 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=MOUNT_FILESYSTEMS ACTION=RELEASE FILE_SYSTEMS='"/board;/board_org"' RESOURCE_GROUPS='"epprd_rg' '"' NFS_NETWORKS='""' NFS_HOSTS='""' IP_LABELS='""' +epprd_rg:process_resources[1] JOB_TYPE=MOUNT_FILESYSTEMS +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] FILE_SYSTEMS='/board;/board_org' +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] NFS_NETWORKS='' +epprd_rg:process_resources[1] NFS_HOSTS='' +epprd_rg:process_resources[1] IP_LABELS='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ MOUNT_FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources[3360] [[ MOUNT_FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources[3612] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[3616] unmount_nfs_filesystems +epprd_rg:process_resources[unmount_nfs_filesystems:1397] PS4_FUNC=unmount_nfs_filesystems +epprd_rg:process_resources[unmount_nfs_filesystems:1397] typeset PS4_FUNC +epprd_rg:process_resources[unmount_nfs_filesystems:1398] [[ high == high ]] +epprd_rg:process_resources[unmount_nfs_filesystems:1398] set -x +epprd_rg:process_resources[unmount_nfs_filesystems:1400] STAT=0 +epprd_rg:process_resources[unmount_nfs_filesystems:1402] cl_deactivate_nfs +epprd_rg:cl_deactivate_nfs[+75] [[ high == high ]] +epprd_rg:cl_deactivate_nfs[+75] version=1.2.5.1 $Source$ +epprd_rg:cl_deactivate_nfs[+77] STATUS=0 +epprd_rg:cl_deactivate_nfs[+78] PIDLIST= +epprd_rg:cl_deactivate_nfs[+80] set -u +epprd_rg:cl_deactivate_nfs[+154] PROC_RES=false +epprd_rg:cl_deactivate_nfs[+158] [[ MOUNT_FILESYSTEMS != 0 ]] +epprd_rg:cl_deactivate_nfs[+158] [[ MOUNT_FILESYSTEMS != GROUP ]] +epprd_rg:cl_deactivate_nfs[+159] PROC_RES=true +epprd_rg:cl_deactivate_nfs[+175] export GROUPNAME +epprd_rg:cl_deactivate_nfs[+175] [[ true == true ]] +epprd_rg:cl_deactivate_nfs[+178] read UNSORTED_FILELIST +epprd_rg:cl_deactivate_nfs[+178] get_list_head /board;/board_org +epprd_rg:cl_deactivate_nfs[+179] read FILE_SYSTEMS +epprd_rg:cl_deactivate_nfs[+179] get_list_tail /board;/board_org +epprd_rg:cl_deactivate_nfs[+186] +epprd_rg:cl_deactivate_nfs[+186] /bin/sort -r +epprd_rg:cl_deactivate_nfs[+186] /bin/echo /board;/board_org FILELIST=/board;/board_org +epprd_rg:cl_deactivate_nfs[+188] grep -q \;/ +epprd_rg:cl_deactivate_nfs[+188] echo /board;/board_org +epprd_rg:cl_deactivate_nfs[+189] CROSSMOUNT=1 +epprd_rg:cl_deactivate_nfs[+189] [[ 1 != 0 ]] +epprd_rg:cl_deactivate_nfs[+194] +epprd_rg:cl_deactivate_nfs[+194] /bin/sort -k 1,1r -t; +epprd_rg:cl_deactivate_nfs[+194] /bin/echo /board;/board_org MNT=/board;/board_org +epprd_rg:cl_deactivate_nfs[+200] ALLNFS=All_nfs_mounts +epprd_rg:cl_deactivate_nfs[+201] cl_RMupdate resource_releasing All_nfs_mounts cl_deactivate_nfs 2023-01-28T18:05:29.409012 2023-01-28T18:05:29.413459 +epprd_rg:cl_deactivate_nfs[+203] +epprd_rg:cl_deactivate_nfs[+203] odmget -q name=RECOVERY_METHOD AND group=epprd_rg HACMPresource +epprd_rg:cl_deactivate_nfs[+203] grep value +epprd_rg:cl_deactivate_nfs[+203] awk {print $3} +epprd_rg:cl_deactivate_nfs[+203] sed s/"//g METHOD=sequential +epprd_rg:cl_deactivate_nfs[+206] typeset PS4_LOOP=/board;/board_org +epprd_rg:cl_deactivate_nfs:/board;/board_org[+207] (( 1 != 0 )) +epprd_rg:cl_deactivate_nfs:/board;/board_org[+209] +epprd_rg:cl_deactivate_nfs:/board;/board_org[+209] cut -f2 -d; +epprd_rg:cl_deactivate_nfs:/board;/board_org[+209] echo /board;/board_org fs=/board_org +epprd_rg:cl_deactivate_nfs:/board;/board_org[+210] +epprd_rg:cl_deactivate_nfs:/board;/board_org[+210] cut -f1 -d; +epprd_rg:cl_deactivate_nfs:/board;/board_org[+210] echo /board;/board_org mnt=/board +epprd_rg:cl_deactivate_nfs:/board;/board_org[+220] +epprd_rg:cl_deactivate_nfs:/board;/board_org[+220] awk -v MFS=/board BEGIN {MFS=sprintf("^%s$", MFS)} \ match($4, "nfs") && match($3, MFS) {print $2} +epprd_rg:cl_deactivate_nfs:/board;/board_org[+220] mount f=/board_org +epprd_rg:cl_deactivate_nfs:/board;/board_org[+220] [[ /board_org == /board_org ]] +epprd_rg:cl_deactivate_nfs:/board;/board_org[+227] pid= +epprd_rg:cl_deactivate_nfs:/board;/board_org[+227] [[ sequential == sequential ]] +epprd_rg:cl_deactivate_nfs:/board;/board_org[+227] [[ rg_move == node_down ]] +epprd_rg:cl_deactivate_nfs:/board;/board_org[+227] [[ rg_move == rg_move ]] +epprd_rg:cl_deactivate_nfs:/board;/board_org[+252] pid=28574130 +epprd_rg:cl_deactivate_nfs:/board;/board_org[+252] [[ -n 28574130 ]] +epprd_rg:cl_deactivate_nfs:/board;/board_org[+251] do_umount /board +epprd_rg:cl_deactivate_nfs(0):/board;/board_org[do_umount+4] typeset fs=/board +epprd_rg:cl_deactivate_nfs(0):/board;/board_org[do_umount+31] cl_nfskill -k -u /board +epprd_rg:cl_deactivate_nfs:/board;/board_org[+264] grep -qw 28574130 +epprd_rg:cl_deactivate_nfs:/board;/board_org[+264] echo +epprd_rg:cl_deactivate_nfs:/board;/board_org[+267] (( 1 != 0 )) +epprd_rg:cl_deactivate_nfs:/board;/board_org[+268] PIDLIST= 28574130 +epprd_rg:cl_deactivate_nfs:/board;/board_org[+274] unset PS4_LOOP +epprd_rg:cl_deactivate_nfs[+279] wait 28574130 +epprd_rg:cl_deactivate_nfs(0):/board;/board_org[do_umount+33] sleep 2 +epprd_rg:cl_deactivate_nfs(2):/board;/board_org[do_umount+34] cl_nfskill -k -u /board +epprd_rg:cl_deactivate_nfs(2):/board;/board_org[do_umount+36] sleep 2 +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+39] amlog_trace Deactivating NFS|/board +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] clcycle clavailability.log +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] cltime DATE=2023-01-28T18:05:33.462470 +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] echo |2023-01-28T18:05:33.462470|INFO: Deactivating NFS|/board +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+40] typeset COUNT=20 +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+41] true +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+42] date +%h %d %H:%M:%S.000 +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+42] : Attempt 1 of 20 to unmount at Jan 28 18:05:33.000 +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+43] umount -f /board forced unmount of /board +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+44] (( 0 != 0 )) +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+61] amlog_trace Deactivating NFS|/board +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] clcycle clavailability.log +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] cltime DATE=2023-01-28T18:05:33.501664 +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] echo |2023-01-28T18:05:33.501664|INFO: Deactivating NFS|/board +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+62] break +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+65] return 0 +epprd_rg:cl_deactivate_nfs[+280] (( 0 != 0 )) +epprd_rg:cl_deactivate_nfs[+291] ALLNOERRNFS=All_nonerror_nfs_mounts +epprd_rg:cl_deactivate_nfs[+292] cl_RMupdate resource_down All_nonerror_nfs_mounts cl_deactivate_nfs 2023-01-28T18:05:33.524646 2023-01-28T18:05:33.529117 +epprd_rg:cl_deactivate_nfs[+295] exit 0 +epprd_rg:process_resources[unmount_nfs_filesystems:1403] RC=0 +epprd_rg:process_resources[unmount_nfs_filesystems:1406] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[unmount_nfs_filesystems:1420] (( 0 != 0 )) +epprd_rg:process_resources[unmount_nfs_filesystems:1426] return 0 +epprd_rg:process_resources[3617] RC=0 +epprd_rg:process_resources[3618] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3620] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:05:33.541791 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=EXPORT_FILESYSTEMS ACTION=RELEASE EXPORT_FILE_SYSTEMS='"/board_org,/sapmnt/EPP"' EXPORT_FILE_SYSTEMS_V4='""' RESOURCE_GROUPS='"epprd_rg' '"' STABLE_STORAGE_PATH='""' IP_LABELS='""' DAEMONS='"NFS' '"' +epprd_rg:process_resources[1] JOB_TYPE=EXPORT_FILESYSTEMS +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] EXPORT_FILE_SYSTEMS=/board_org,/sapmnt/EPP +epprd_rg:process_resources[1] EXPORT_FILE_SYSTEMS_V4='' +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] STABLE_STORAGE_PATH='' +epprd_rg:process_resources[1] IP_LABELS='' +epprd_rg:process_resources[1] DAEMONS='NFS ' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ EXPORT_FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources[3360] [[ EXPORT_FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources[3595] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[3599] unexport_filesystems +epprd_rg:process_resources[unexport_filesystems:1576] PS4_FUNC=unexport_filesystems +epprd_rg:process_resources[unexport_filesystems:1576] typeset PS4_FUNC +epprd_rg:process_resources[unexport_filesystems:1577] [[ high == high ]] +epprd_rg:process_resources[unexport_filesystems:1577] set -x +epprd_rg:process_resources[unexport_filesystems:1578] STAT=0 +epprd_rg:process_resources[unexport_filesystems:1579] NFSSTOPPED=0 +epprd_rg:process_resources[unexport_filesystems:1580] RPCSTOPPED=0 +epprd_rg:process_resources[unexport_filesystems:1582] export NFSSTOPPED +epprd_rg:process_resources[unexport_filesystems:1585] : For NFSv4, cl_unexport_fs will use STABLE_STORAGE_PATH, which is set by +epprd_rg:process_resources[unexport_filesystems:1586] : clRGPA and can have colon-separated values for multiple RGs. +epprd_rg:process_resources[unexport_filesystems:1587] : We will save off clRGPA values in stable_storage_path and then extract +epprd_rg:process_resources[unexport_filesystems:1588] : each RG into STABLE_STORAGE_PATH for cl_unexport_fs. +epprd_rg:process_resources[unexport_filesystems:1590] stable_storage_path='' +epprd_rg:process_resources[unexport_filesystems:1590] typeset stable_storage_path +epprd_rg:process_resources[unexport_filesystems:1594] export GROUPNAME +epprd_rg:process_resources[unexport_filesystems:1596] get_list_head /board_org,/sapmnt/EPP +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo /board_org,/sapmnt/EPP +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo /board_org,/sapmnt/EPP +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[unexport_filesystems:1596] read LIST_OF_EXPORT_FILE_SYSTEMS_FOR_RG +epprd_rg:process_resources[unexport_filesystems:1597] get_list_tail /board_org,/sapmnt/EPP +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo /board_org,/sapmnt/EPP +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[unexport_filesystems:1597] read EXPORT_FILE_SYSTEMS +epprd_rg:process_resources[unexport_filesystems:1599] get_list_head +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[unexport_filesystems:1599] read LIST_OF_EXPORT_FILE_SYSTEMS_V4_FOR_RG +epprd_rg:process_resources[unexport_filesystems:1600] get_list_tail +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[unexport_filesystems:1600] read EXPORT_FILE_SYSTEMS_V4 +epprd_rg:process_resources[unexport_filesystems:1601] get_list_head +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[unexport_filesystems:1601] read STABLE_STORAGE_PATH +epprd_rg:process_resources[unexport_filesystems:1602] get_list_tail +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[unexport_filesystems:1602] read stable_storage_path +epprd_rg:process_resources[unexport_filesystems:1604] cl_unexport_fs '/board_org /sapmnt/EPP' '' +epprd_rg:cl_unexport_fs[136] version=%I% +epprd_rg:cl_unexport_fs[139] . /usr/es/sbin/cluster/events/utils/cl_nfs_utils +epprd_rg:cl_unexport_fs[98] PROGNAME=cl_unexport_fs +epprd_rg:cl_unexport_fs[99] [[ high == high ]] +epprd_rg:cl_unexport_fs[101] set -x +epprd_rg:cl_unexport_fs[102] version=%I +epprd_rg:cl_unexport_fs[105] cl_exports_data='' +epprd_rg:cl_unexport_fs[105] typeset cl_exports_data +epprd_rg:cl_unexport_fs[106] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_unexport_fs[141] UNEXPORT_V3='/board_org /sapmnt/EPP' +epprd_rg:cl_unexport_fs[142] UNEXPORT_V4='' +epprd_rg:cl_unexport_fs[144] STATUS=0 +epprd_rg:cl_unexport_fs[146] PROC_RES=false +epprd_rg:cl_unexport_fs[150] [[ EXPORT_FILESYSTEMS != 0 ]] +epprd_rg:cl_unexport_fs[150] [[ EXPORT_FILESYSTEMS != GROUP ]] +epprd_rg:cl_unexport_fs[151] PROC_RES=true +epprd_rg:cl_unexport_fs[154] set -u +epprd_rg:cl_unexport_fs[156] (( 2 != 2 )) +epprd_rg:cl_unexport_fs[162] [[ __AIX__ == __AIX__ ]] +epprd_rg:cl_unexport_fs[164] oslevel -r +epprd_rg:cl_unexport_fs[164] cut -c1-2 +epprd_rg:cl_unexport_fs[164] (( 72 > 52 )) +epprd_rg:cl_unexport_fs[166] FORCE=-F +epprd_rg:cl_unexport_fs[180] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_unexport_fs[181] DARE_EVENT=reconfig_resource_release +epprd_rg:cl_unexport_fs[184] unexport_v4='' +epprd_rg:cl_unexport_fs[185] [[ -z '' ]] +epprd_rg:cl_unexport_fs[185] [[ rg_move == reconfig_resource_release ]] +epprd_rg:cl_unexport_fs[196] [[ -z '' ]] +epprd_rg:cl_unexport_fs[196] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_unexport_fs[198] unexport_v3='' +epprd_rg:cl_unexport_fs[204] getline_exports /board_org +epprd_rg:cl_unexport_fs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_unexport_fs[getline_exports:45] line='' +epprd_rg:cl_unexport_fs[getline_exports:45] typeset line +epprd_rg:cl_unexport_fs[getline_exports:46] flag=0 +epprd_rg:cl_unexport_fs[getline_exports:46] typeset -i flag +epprd_rg:cl_unexport_fs[getline_exports:47] fs=/board_org +epprd_rg:cl_unexport_fs[getline_exports:49] [[ -z /board_org ]] +epprd_rg:cl_unexport_fs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_unexport_fs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_unexport_fs[getline_exports:56] read -r line +epprd_rg:cl_unexport_fs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_unexport_fs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_unexport_fs[getline_exports:69] (( 1 == 0 )) +epprd_rg:cl_unexport_fs[getline_exports:74] [[ 0 == 1 ]] +epprd_rg:cl_unexport_fs[getline_exports:56] read -r line +epprd_rg:cl_unexport_fs[getline_exports:89] return 0 +epprd_rg:cl_unexport_fs[205] export_line='' +epprd_rg:cl_unexport_fs[210] echo +epprd_rg:cl_unexport_fs[210] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_unexport_fs[211] cut -d- -f2- +epprd_rg:cl_unexport_fs[211] tr , ' ' +epprd_rg:cl_unexport_fs[210] options='' +epprd_rg:cl_unexport_fs[217] vers_missing=1 +epprd_rg:cl_unexport_fs[240] (( vers_missing )) +epprd_rg:cl_unexport_fs[240] unexport_v3=' /board_org' +epprd_rg:cl_unexport_fs[204] getline_exports /sapmnt/EPP +epprd_rg:cl_unexport_fs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_unexport_fs[getline_exports:45] line='' +epprd_rg:cl_unexport_fs[getline_exports:45] typeset line +epprd_rg:cl_unexport_fs[getline_exports:46] flag=0 +epprd_rg:cl_unexport_fs[getline_exports:46] typeset -i flag +epprd_rg:cl_unexport_fs[getline_exports:47] fs=/sapmnt/EPP +epprd_rg:cl_unexport_fs[getline_exports:49] [[ -z /sapmnt/EPP ]] +epprd_rg:cl_unexport_fs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_unexport_fs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_unexport_fs[getline_exports:56] read -r line +epprd_rg:cl_unexport_fs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_unexport_fs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:68] grep -q '^[[:space:]]*/sapmnt/EPP[[:space:]]' +epprd_rg:cl_unexport_fs[getline_exports:69] (( 0 == 0 )) +epprd_rg:cl_unexport_fs[getline_exports:71] flag=1 +epprd_rg:cl_unexport_fs[getline_exports:74] [[ 1 == 1 ]] +epprd_rg:cl_unexport_fs[getline_exports:76] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:76] grep -w '\\' +epprd_rg:cl_unexport_fs[getline_exports:76] [[ -n '' ]] +epprd_rg:cl_unexport_fs[getline_exports:81] cl_exports_data=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:82] break +epprd_rg:cl_unexport_fs[getline_exports:89] return 0 +epprd_rg:cl_unexport_fs[205] export_line=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[210] echo /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_unexport_fs[210] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_unexport_fs[211] cut -d- -f2- +epprd_rg:cl_unexport_fs[211] tr , ' ' +epprd_rg:cl_unexport_fs[210] options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap root=epprdap ' +epprd_rg:cl_unexport_fs[217] vers_missing=1 +epprd_rg:cl_unexport_fs[240] (( vers_missing )) +epprd_rg:cl_unexport_fs[240] unexport_v3=' /board_org /sapmnt/EPP' +epprd_rg:cl_unexport_fs[243] UNEXPORT_V3=' /board_org /sapmnt/EPP' +epprd_rg:cl_unexport_fs[244] UNEXPORT_V4='' +epprd_rg:cl_unexport_fs[247] hasrv='' +epprd_rg:cl_unexport_fs[249] [[ -z '' ]] +epprd_rg:cl_unexport_fs[251] query=name='STABLE_STORAGE_PATH AND group=epprd_rg' +epprd_rg:cl_unexport_fs[252] odmget -q name='STABLE_STORAGE_PATH AND group=epprd_rg' HACMPresource +epprd_rg:cl_unexport_fs[252] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_unexport_fs[252] STABLE_STORAGE_PATH='' +epprd_rg:cl_unexport_fs[256] [[ -z '' ]] +epprd_rg:cl_unexport_fs[258] STABLE_STORAGE_PATH=/var/adm/nfsv4.hacmp/epprd_rg +epprd_rg:cl_unexport_fs[261] [[ -z '' ]] +epprd_rg:cl_unexport_fs[263] query=name='SERVICE_LABEL AND group=epprd_rg' +epprd_rg:cl_unexport_fs[264] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_unexport_fs[264] odmget -q name='SERVICE_LABEL AND group=epprd_rg' HACMPresource +epprd_rg:cl_unexport_fs[264] SERVICE_LABEL=epprd +epprd_rg:cl_unexport_fs[268] ps -eo args +epprd_rg:cl_unexport_fs[268] grep -w nfsd +epprd_rg:cl_unexport_fs[268] grep -qw -- '-gp on' +epprd_rg:cl_unexport_fs[272] gp=off +epprd_rg:cl_unexport_fs[275] /usr/sbin/bootinfo -K +epprd_rg:cl_unexport_fs[275] KERNEL_BITS=64 +epprd_rg:cl_unexport_fs[277] [[ off == on ]] +epprd_rg:cl_unexport_fs[282] NFSv4_REGISTERED=0 +epprd_rg:cl_unexport_fs[286] V3=:2:3 +epprd_rg:cl_unexport_fs[287] V4=:4 +epprd_rg:cl_unexport_fs[289] [[ rg_move != reconfig_resource_release ]] +epprd_rg:cl_unexport_fs[290] [[ rg_move != release_vg_fs ]] +epprd_rg:cl_unexport_fs[298] [[ -n '' ]] +epprd_rg:cl_unexport_fs[321] V3='' +epprd_rg:cl_unexport_fs[322] V4='' +epprd_rg:cl_unexport_fs[326] ALLEXPORTS=All_exports +epprd_rg:cl_unexport_fs[328] cl_RMupdate resource_releasing All_exports cl_unexport_fs 2023-01-28T18:05:33.791739 2023-01-28T18:05:33.796007 +epprd_rg:cl_unexport_fs[330] tr ' ' '\n' +epprd_rg:cl_unexport_fs[330] echo /board_org /sapmnt/EPP +epprd_rg:cl_unexport_fs[330] sort +epprd_rg:cl_unexport_fs[330] FILESYSTEM_LIST=$'/board_org\n/sapmnt/EPP' +epprd_rg:cl_unexport_fs[334] v3='' +epprd_rg:cl_unexport_fs[335] v4='' +epprd_rg:cl_unexport_fs[336] root='' +epprd_rg:cl_unexport_fs[337] old_options='' +epprd_rg:cl_unexport_fs[338] new_options='' +epprd_rg:cl_unexport_fs[340] exportfs +epprd_rg:cl_unexport_fs[340] grep '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_unexport_fs[340] export_line='/board_org -root=epprd:epprda:epprds' +epprd_rg:cl_unexport_fs[342] [[ -z '/board_org -root=epprd:epprda:epprds' ]] +epprd_rg:cl_unexport_fs[344] echo /board_org -root=epprd:epprda:epprds +epprd_rg:cl_unexport_fs[344] cut '-d ' -f2- +epprd_rg:cl_unexport_fs[344] cut -d- -f2- +epprd_rg:cl_unexport_fs[344] tr , ' ' +epprd_rg:cl_unexport_fs[344] old_options=root=epprd:epprda:epprds +epprd_rg:cl_unexport_fs[365] new_options=,root=epprd:epprda:epprds +epprd_rg:cl_unexport_fs[371] [[ -z '' ]] +epprd_rg:cl_unexport_fs[371] v3='' +epprd_rg:cl_unexport_fs[377] NFS_VER3='' +epprd_rg:cl_unexport_fs[380] [[ /board_org == /board_org ]] +epprd_rg:cl_unexport_fs[380] v3='' +epprd_rg:cl_unexport_fs[380] NFS_VER3=3 +epprd_rg:cl_unexport_fs[380] break +epprd_rg:cl_unexport_fs[382] NFS_VER4='' +epprd_rg:cl_unexport_fs[387] [[ '' == 4 ]] +epprd_rg:cl_unexport_fs[400] echo '' +epprd_rg:cl_unexport_fs[400] cut -d: -f2- +epprd_rg:cl_unexport_fs[400] vers='' +epprd_rg:cl_unexport_fs[402] [[ -z '' ]] +epprd_rg:cl_unexport_fs[404] [[ '' == 4 ]] +epprd_rg:cl_unexport_fs[408] exportfs -i -u -F /board_org exportfs: unexported /board_org +epprd_rg:cl_unexport_fs[410] (( 0 != 0 )) +epprd_rg:cl_unexport_fs[417] continue +epprd_rg:cl_unexport_fs[334] v3='' +epprd_rg:cl_unexport_fs[335] v4='' +epprd_rg:cl_unexport_fs[336] root='' +epprd_rg:cl_unexport_fs[337] old_options='' +epprd_rg:cl_unexport_fs[338] new_options='' +epprd_rg:cl_unexport_fs[340] exportfs +epprd_rg:cl_unexport_fs[340] grep '^[[:space:]]*/sapmnt/EPP[[:space:]]' +epprd_rg:cl_unexport_fs[340] export_line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[342] [[ -z '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' ]] +epprd_rg:cl_unexport_fs[344] echo /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_unexport_fs[344] cut '-d ' -f2- +epprd_rg:cl_unexport_fs[344] cut -d- -f2- +epprd_rg:cl_unexport_fs[344] tr , ' ' +epprd_rg:cl_unexport_fs[344] old_options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap root=epprdap' +epprd_rg:cl_unexport_fs[365] new_options=,sec=sys:krb5p:krb5i:krb5:dh +epprd_rg:cl_unexport_fs[365] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw +epprd_rg:cl_unexport_fs[365] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap +epprd_rg:cl_unexport_fs[365] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_unexport_fs[371] [[ -z '' ]] +epprd_rg:cl_unexport_fs[371] v3='' +epprd_rg:cl_unexport_fs[377] NFS_VER3='' +epprd_rg:cl_unexport_fs[380] [[ /sapmnt/EPP == /board_org ]] +epprd_rg:cl_unexport_fs[380] [[ /sapmnt/EPP == /sapmnt/EPP ]] +epprd_rg:cl_unexport_fs[380] v3='' +epprd_rg:cl_unexport_fs[380] NFS_VER3=3 +epprd_rg:cl_unexport_fs[380] break +epprd_rg:cl_unexport_fs[382] NFS_VER4='' +epprd_rg:cl_unexport_fs[387] [[ '' == 4 ]] +epprd_rg:cl_unexport_fs[400] echo '' +epprd_rg:cl_unexport_fs[400] cut -d: -f2- +epprd_rg:cl_unexport_fs[400] vers='' +epprd_rg:cl_unexport_fs[402] [[ -z '' ]] +epprd_rg:cl_unexport_fs[404] [[ '' == 4 ]] +epprd_rg:cl_unexport_fs[408] exportfs -i -u -F /sapmnt/EPP exportfs: unexported /sapmnt/EPP +epprd_rg:cl_unexport_fs[410] (( 0 != 0 )) +epprd_rg:cl_unexport_fs[417] continue +epprd_rg:cl_unexport_fs[452] [[ -n '' ]] +epprd_rg:cl_unexport_fs[480] ALLNOERREXPORT=All_nonerror_exports +epprd_rg:cl_unexport_fs[482] cl_RMupdate resource_down All_nonerror_exports cl_unexport_fs 2023-01-28T18:05:33.877533 2023-01-28T18:05:33.881793 +epprd_rg:cl_unexport_fs[484] exit 0 +epprd_rg:process_resources[unexport_filesystems:1608] return 0 +epprd_rg:process_resources[3600] RC=0 +epprd_rg:process_resources[3601] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3603] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:05:33.895082 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=FILESYSTEMS ACTION=RELEASE FILE_SYSTEMS='"/usr/sap,/sapmnt,/oracle/EPP/sapdata4,/oracle/EPP/sapdata3,/oracle/EPP/sapdata2,/oracle/EPP/sapdata1,/oracle/EPP/origlogB,/oracle/EPP/origlogA,/oracle/EPP/oraarch,/oracle/EPP/mirrlogB,/oracle/EPP/mirrlogA,/oracle/EPP,/oracle,/board_org"' RESOURCE_GROUPS='"epprd_rg' '"' FSCHECK_TOOLS='""' RECOVERY_METHODS='"sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential"' +epprd_rg:process_resources[1] JOB_TYPE=FILESYSTEMS +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] FILE_SYSTEMS=/usr/sap,/sapmnt,/oracle/EPP/sapdata4,/oracle/EPP/sapdata3,/oracle/EPP/sapdata2,/oracle/EPP/sapdata1,/oracle/EPP/origlogB,/oracle/EPP/origlogA,/oracle/EPP/oraarch,/oracle/EPP/mirrlogB,/oracle/EPP/mirrlogA,/oracle/EPP,/oracle,/board_org +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] FSCHECK_TOOLS='' +epprd_rg:process_resources[1] RECOVERY_METHODS=sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources[3360] [[ FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources[3482] process_file_systems RELEASE +epprd_rg:process_resources[process_file_systems:2640] PS4_FUNC=process_file_systems +epprd_rg:process_resources[process_file_systems:2640] typeset PS4_FUNC +epprd_rg:process_resources[process_file_systems:2641] [[ high == high ]] +epprd_rg:process_resources[process_file_systems:2641] set -x +epprd_rg:process_resources[process_file_systems:2643] STAT=0 +epprd_rg:process_resources[process_file_systems:2645] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[process_file_systems:2667] cl_deactivate_fs +epprd_rg:cl_deactivate_fs[860] version=1.6 +epprd_rg:cl_deactivate_fs[863] STATUS=0 +epprd_rg:cl_deactivate_fs[863] typeset -li STATUS +epprd_rg:cl_deactivate_fs[864] SLEEP=1 +epprd_rg:cl_deactivate_fs[864] typeset -li SLEEP +epprd_rg:cl_deactivate_fs[865] LIMIT=60 +epprd_rg:cl_deactivate_fs[865] typeset -li LIMIT +epprd_rg:cl_deactivate_fs[866] export SLEEP +epprd_rg:cl_deactivate_fs[867] export LIMIT +epprd_rg:cl_deactivate_fs[868] TMP_FILENAME=_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs[870] (( 0 != 0 )) +epprd_rg:cl_deactivate_fs[875] OEM_CALL=false +epprd_rg:cl_deactivate_fs[879] : Check here to see if the forced unmount option can be used +epprd_rg:cl_deactivate_fs[881] FORCE_OK='' +epprd_rg:cl_deactivate_fs[881] export FORCE_OK +epprd_rg:cl_deactivate_fs[882] O_FlAG='' +epprd_rg:cl_deactivate_fs[882] export O_FlAG +epprd_rg:cl_deactivate_fs[885] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_deactivate_fs[886] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_deactivate_fs[887] : 99.99.999.999 +epprd_rg:cl_deactivate_fs[889] typeset -li V R M F +epprd_rg:cl_deactivate_fs[890] typeset -Z2 R +epprd_rg:cl_deactivate_fs[891] typeset -Z3 M +epprd_rg:cl_deactivate_fs[892] typeset -Z3 F +epprd_rg:cl_deactivate_fs[893] jfs2_lvl=601002000 +epprd_rg:cl_deactivate_fs[893] typeset -li jfs2_lvl +epprd_rg:cl_deactivate_fs[894] fuser_lvl=601004000 +epprd_rg:cl_deactivate_fs[894] typeset -li fuser_lvl +epprd_rg:cl_deactivate_fs[895] VRMF=0 +epprd_rg:cl_deactivate_fs[895] typeset -li VRMF +epprd_rg:cl_deactivate_fs[898] : Here try and figure out what level of JFS2 is installed +epprd_rg:cl_deactivate_fs[900] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_deactivate_fs[900] cut -f3 -d: +epprd_rg:cl_deactivate_fs[900] read V R M F +epprd_rg:cl_deactivate_fs[900] IFS=. +epprd_rg:cl_deactivate_fs[901] VRMF=702005102 +epprd_rg:cl_deactivate_fs[903] (( 702005102 >= 601002000 )) +epprd_rg:cl_deactivate_fs[906] : JFS2 at this level that supports forced unmount +epprd_rg:cl_deactivate_fs[908] FORCE_OK=true +epprd_rg:cl_deactivate_fs[911] (( 702005102 >= 601004000 )) +epprd_rg:cl_deactivate_fs[914] : fuser at this level supports the -O flag +epprd_rg:cl_deactivate_fs[916] O_FLAG=-O +epprd_rg:cl_deactivate_fs[920] : if JOB_TYPE is set and is not GROUP, then process_resources is parent +epprd_rg:cl_deactivate_fs[922] [[ FILESYSTEMS != 0 ]] +epprd_rg:cl_deactivate_fs[922] [[ FILESYSTEMS != GROUP ]] +epprd_rg:cl_deactivate_fs[923] deactivate_fs_process_resources +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:705] STATUS=0 +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:705] typeset -li STATUS +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:708] : for the temp file, just take the first rg name +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:710] print epprd_rg +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:710] cut -f 1 -d ' ' +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:710] read RES_GRP +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:711] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:714] : Remove the status file if already exists +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:716] rm -f /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:719] : go through all resource groups +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:721] pid_list='' +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:724] export GROUPNAME +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:725] export RECOVERY_METHOD +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:728] : Get a reverse sorted list of the filesystems in this RG so that they +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:729] : release in opposite order of mounting. This is needed for nested mounts. +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:731] print /usr/sap,/sapmnt,/oracle/EPP/sapdata4,/oracle/EPP/sapdata3,/oracle/EPP/sapdata2,/oracle/EPP/sapdata1,/oracle/EPP/origlogB,/oracle/EPP/origlogA,/oracle/EPP/oraarch,/oracle/EPP/mirrlogB,/oracle/EPP/mirrlogA,/oracle/EPP,/oracle,/board_org +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:731] read LIST_OF_FILE_SYSTEMS_FOR_RG FILE_SYSTEMS +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:731] IFS=: +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:732] print /usr/sap,/sapmnt,/oracle/EPP/sapdata4,/oracle/EPP/sapdata3,/oracle/EPP/sapdata2,/oracle/EPP/sapdata1,/oracle/EPP/origlogB,/oracle/EPP/origlogA,/oracle/EPP/oraarch,/oracle/EPP/mirrlogB,/oracle/EPP/mirrlogA,/oracle/EPP,/oracle,/board_org +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:732] tr , '\n' +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:732] sort -ru +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:732] LIST_OF_FILE_SYSTEMS_FOR_RG=$'/usr/sap\n/sapmnt\n/oracle/EPP/sapdata4\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata1\n/oracle/EPP/origlogB\n/oracle/EPP/origlogA\n/oracle/EPP/oraarch\n/oracle/EPP/mirrlogB\n/oracle/EPP/mirrlogA\n/oracle/EPP\n/oracle\n/board_org' +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:733] find_nested_mounts $'/usr/sap\n/sapmnt\n/oracle/EPP/sapdata4\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata1\n/oracle/EPP/origlogB\n/oracle/EPP/origlogA\n/oracle/EPP/oraarch\n/oracle/EPP/mirrlogB\n/oracle/EPP/mirrlogA\n/oracle/EPP\n/oracle\n/board_org' +epprd_rg:cl_deactivate_fs[find_nested_mounts:88] given_fs_list=$'/usr/sap\n/sapmnt\n/oracle/EPP/sapdata4\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata1\n/oracle/EPP/origlogB\n/oracle/EPP/origlogA\n/oracle/EPP/oraarch\n/oracle/EPP/mirrlogB\n/oracle/EPP/mirrlogA\n/oracle/EPP\n/oracle\n/board_org' +epprd_rg:cl_deactivate_fs[find_nested_mounts:88] typeset given_fs_list +epprd_rg:cl_deactivate_fs[find_nested_mounts:90] typeset first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:91] mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:91] mount_out=$' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:91] typeset mount_out +epprd_rg:cl_deactivate_fs[find_nested_mounts:92] discovered_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:92] typeset discovered_fs +epprd_rg:cl_deactivate_fs[find_nested_mounts:93] typeset line fs nested_fs +epprd_rg:cl_deactivate_fs[find_nested_mounts:94] typeset mounted_fs_list +epprd_rg:cl_deactivate_fs[find_nested_mounts:96] fs_count=0 +epprd_rg:cl_deactivate_fs[find_nested_mounts:96] typeset -li fs_count +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /usr/sap +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/saplv /usr/sap jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/saplv /usr/sap jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /sapmnt +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/sapmntlv /sapmnt jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/sapmntlv /sapmnt jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=$' /dev/epplv /oracle/EPP jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- $' /dev/epplv /oracle/EPP jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 10' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 10 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] echo $' /dev/epplv /oracle/EPP jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/epplv /oracle/EPP jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:128] [[ jfs2 == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n '' ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/mirrlogA == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/mirrlogAlv /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/mirrlogA ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/mirrlogB == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/mirrlogBlv /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/mirrlogB ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/oraarch == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/oraarchlv /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/oraarch ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/origlogA == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/origlogAlv /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/origlogA ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/origlogB == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/origlogBlv /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/origlogB ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata1 == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata1lv /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata1 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata2 == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata2lv /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata2 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata3 == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata3lv /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata3 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata4 == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata4lv /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata4 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=$' /dev/oraclelv /oracle jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- $' /dev/oraclelv /oracle jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 11' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 11 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] echo $' /dev/oraclelv /oracle jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/oraclelv /oracle jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:128] [[ jfs2 == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n '' ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/epplv /oracle/EPP jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/epplv /oracle/EPP +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/mirrlogA == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/mirrlogAlv /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/mirrlogA ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/mirrlogB == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/mirrlogBlv /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/mirrlogB ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/oraarch == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/oraarchlv /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/oraarch ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/origlogA == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/origlogAlv /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/origlogA ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/origlogB == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/origlogBlv /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/origlogB ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata1 == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata1lv /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata1 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata2 == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata2lv /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata2 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata3 == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata3lv /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata3 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata4 == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata4lv /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata4 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /board_org +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/boardlv /board_org jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/boardlv /board_org jfs2 Jan 28 18:03 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:150] : Pass comprehensive list to stdout, sorted to get correct unmount order +epprd_rg:cl_deactivate_fs[find_nested_mounts:152] print -- $'/usr/sap\n/sapmnt\n/oracle/EPP/sapdata4\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata1\n/oracle/EPP/origlogB\n/oracle/EPP/origlogA\n/oracle/EPP/oraarch\n/oracle/EPP/mirrlogB\n/oracle/EPP/mirrlogA\n/oracle/EPP\n/oracle\n/board_org' ' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[find_nested_mounts:152] sort -ru +epprd_rg:cl_deactivate_fs[find_nested_mounts:152] tr ' ' '\n' +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:733] LIST_OF_FILE_SYSTEMS_FOR_RG=$'/usr/sap\n/sapmnt\n/oracle/EPP/sapdata4\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata1\n/oracle/EPP/origlogB\n/oracle/EPP/origlogA\n/oracle/EPP/oraarch\n/oracle/EPP/mirrlogB\n/oracle/EPP/mirrlogA\n/oracle/EPP\n/oracle\n/board_org' +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:736] : Get the recovery method used for all filesystems in this resource group +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:738] print sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:738] read RECOVERY_METHOD RECOVERY_METHODS +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:738] IFS=: +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:739] cut -f 1 -d , +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:739] print sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:739] RECOVERY_METHOD=sequential +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:742] : verify the recovery method +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:744] RECOVERY_METHOD=sequential +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:745] RECOVERY_METHOD=sequential +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:747] [[ sequential != sequential ]] +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:754] : Tell the cluster manager what we are going to do +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:756] ALLFS=All_filesystems +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:757] cl_RMupdate resource_releasing All_filesystems cl_deactivate_fs 2023-01-28T18:05:34.185594 2023-01-28T18:05:34.189883 +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:760] : now that all variables are set, perform the umounts +epprd_rg:cl_deactivate_fs:/usr/sap[deactivate_fs_process_resources:764] PS4_LOOP=/usr/sap +epprd_rg:cl_deactivate_fs:/usr/sap[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/usr/sap[deactivate_fs_process_resources:770] fs_umount /usr/sap cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.284)[fs_umount:313] FS=/usr/sap +epprd_rg:cl_deactivate_fs(0.284)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(0.284)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(0.284)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(0.284)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.284)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(0.284)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(0.304)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(0.304)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(0.304)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(0.304)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(0.304)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(0.304)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(0.304)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(0.304)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(0.304)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(0.304)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(0.304)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(0.304)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(0.304)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(0.304)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(0.304)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(0.304)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(0.304)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(0.305)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(0.307)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/usr/sap +epprd_rg:cl_deactivate_fs(0.311)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(0.311)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(0.311)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(0.311)[fs_umount:367] lsfs -c /usr/sap +epprd_rg:cl_deactivate_fs(0.314)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/usr/sap:/dev/saplv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(0.314)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(0.315)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/usr/sap:/dev/saplv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(0.317)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(0.319)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(0.319)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(0.319)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(0.319)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(0.319)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(0.320)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(0.321)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(0.322)[fs_umount:394] awk '{ if ( $1 == "/dev/saplv" ) print $2 }' +epprd_rg:cl_deactivate_fs(0.327)[fs_umount:394] FS_MOUNTED=/usr/sap +epprd_rg:cl_deactivate_fs(0.327)[fs_umount:395] [[ -n /usr/sap ]] +epprd_rg:cl_deactivate_fs(0.327)[fs_umount:397] [[ /usr/sap != /usr/sap ]] +epprd_rg:cl_deactivate_fs(0.327)[fs_umount:409] [[ /usr/sap == / ]] +epprd_rg:cl_deactivate_fs(0.327)[fs_umount:409] [[ /usr/sap == /usr ]] +epprd_rg:cl_deactivate_fs(0.327)[fs_umount:409] [[ /usr/sap == /dev ]] +epprd_rg:cl_deactivate_fs(0.327)[fs_umount:409] [[ /usr/sap == /proc ]] +epprd_rg:cl_deactivate_fs(0.327)[fs_umount:409] [[ /usr/sap == /var ]] +epprd_rg:cl_deactivate_fs(0.327)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/usr/sap' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:34.262475 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:34.262475|INFO: Deactivating Filesystem|/usr/sap' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(0.356)[fs_umount:427] : Try up to 60 times to unmount /usr/sap +epprd_rg:cl_deactivate_fs(0.356)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(0.356)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(0.356)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(0.359)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:05:34.000 +epprd_rg:cl_deactivate_fs(0.359)[fs_umount:434] umount /usr/sap +epprd_rg:cl_deactivate_fs(0.427)[fs_umount:437] : Unmount of /usr/sap worked. Can stop now. +epprd_rg:cl_deactivate_fs(0.427)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(0.427)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(0.427)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/usr/sap' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:34.362745 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:34.362745|INFO: Deactivating Filesystem|/usr/sap' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(0.456)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(0.456)[fs_umount:687] print -- 0 /dev/saplv /usr/sap +epprd_rg:cl_deactivate_fs(0.457)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.457)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/sapmnt[deactivate_fs_process_resources:764] PS4_LOOP=/sapmnt +epprd_rg:cl_deactivate_fs:/sapmnt[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/sapmnt[deactivate_fs_process_resources:770] fs_umount /sapmnt cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.457)[fs_umount:313] FS=/sapmnt +epprd_rg:cl_deactivate_fs(0.457)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(0.457)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(0.457)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(0.457)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.457)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(0.457)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(0.477)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(0.477)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(0.477)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(0.477)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(0.477)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(0.477)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(0.477)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(0.477)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(0.477)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(0.477)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(0.477)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(0.477)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(0.477)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(0.477)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(0.477)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(0.477)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(0.477)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(0.479)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/sapmnt +epprd_rg:cl_deactivate_fs(0.479)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(0.484)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(0.484)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(0.484)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(0.484)[fs_umount:367] lsfs -c /sapmnt +epprd_rg:cl_deactivate_fs(0.487)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/sapmnt:/dev/sapmntlv:jfs2:::20971520:rw:no:no' +epprd_rg:cl_deactivate_fs(0.487)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(0.488)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/sapmnt:/dev/sapmntlv:jfs2:::20971520:rw:no:no' +epprd_rg:cl_deactivate_fs(0.489)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(0.488)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(0.489)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(0.491)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(0.491)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(0.491)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(0.493)[fs_umount:394] awk '{ if ( $1 == "/dev/sapmntlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(0.493)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(0.493)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(0.497)[fs_umount:394] FS_MOUNTED=/sapmnt +epprd_rg:cl_deactivate_fs(0.497)[fs_umount:395] [[ -n /sapmnt ]] +epprd_rg:cl_deactivate_fs(0.497)[fs_umount:397] [[ /sapmnt != /sapmnt ]] +epprd_rg:cl_deactivate_fs(0.497)[fs_umount:409] [[ /sapmnt == / ]] +epprd_rg:cl_deactivate_fs(0.497)[fs_umount:409] [[ /sapmnt == /usr ]] +epprd_rg:cl_deactivate_fs(0.497)[fs_umount:409] [[ /sapmnt == /dev ]] +epprd_rg:cl_deactivate_fs(0.497)[fs_umount:409] [[ /sapmnt == /proc ]] +epprd_rg:cl_deactivate_fs(0.497)[fs_umount:409] [[ /sapmnt == /var ]] +epprd_rg:cl_deactivate_fs(0.497)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/sapmnt' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:34.431926 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:34.431926|INFO: Deactivating Filesystem|/sapmnt' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(0.526)[fs_umount:427] : Try up to 60 times to unmount /sapmnt +epprd_rg:cl_deactivate_fs(0.526)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(0.526)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(0.526)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(0.529)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:05:34.000 +epprd_rg:cl_deactivate_fs(0.529)[fs_umount:434] umount /sapmnt +epprd_rg:cl_deactivate_fs(0.597)[fs_umount:437] : Unmount of /sapmnt worked. Can stop now. +epprd_rg:cl_deactivate_fs(0.597)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(0.597)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(0.597)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/sapmnt' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:34.532342 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:34.532342|INFO: Deactivating Filesystem|/sapmnt' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(0.626)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(0.626)[fs_umount:687] print -- 0 /dev/sapmntlv /sapmnt +epprd_rg:cl_deactivate_fs(0.626)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.626)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata4[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata4[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata4[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/sapdata4 cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.626)[fs_umount:313] FS=/oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(0.626)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(0.626)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(0.626)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(0.626)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.626)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(0.626)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(0.647)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(0.647)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(0.647)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(0.647)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(0.647)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(0.647)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(0.647)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(0.647)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(0.647)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(0.647)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(0.647)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(0.647)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(0.647)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(0.647)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(0.647)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(0.647)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(0.647)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(0.649)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(0.649)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(0.653)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(0.653)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(0.653)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(0.653)[fs_umount:367] lsfs -c /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(0.656)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata4:/dev/sapdata4lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(0.656)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(0.657)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata4:/dev/sapdata4lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(0.659)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(0.658)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(0.659)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(0.660)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(0.660)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(0.660)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(0.662)[fs_umount:394] awk '{ if ( $1 == "/dev/sapdata4lv" ) print $2 }' +epprd_rg:cl_deactivate_fs(0.662)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(0.662)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(0.666)[fs_umount:394] FS_MOUNTED=/oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(0.666)[fs_umount:395] [[ -n /oracle/EPP/sapdata4 ]] +epprd_rg:cl_deactivate_fs(0.667)[fs_umount:397] [[ /oracle/EPP/sapdata4 != /oracle/EPP/sapdata4 ]] +epprd_rg:cl_deactivate_fs(0.667)[fs_umount:409] [[ /oracle/EPP/sapdata4 == / ]] +epprd_rg:cl_deactivate_fs(0.667)[fs_umount:409] [[ /oracle/EPP/sapdata4 == /usr ]] +epprd_rg:cl_deactivate_fs(0.667)[fs_umount:409] [[ /oracle/EPP/sapdata4 == /dev ]] +epprd_rg:cl_deactivate_fs(0.667)[fs_umount:409] [[ /oracle/EPP/sapdata4 == /proc ]] +epprd_rg:cl_deactivate_fs(0.667)[fs_umount:409] [[ /oracle/EPP/sapdata4 == /var ]] +epprd_rg:cl_deactivate_fs(0.667)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:34.601613 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:34.601613|INFO: Deactivating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(0.695)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(0.695)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(0.695)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(0.695)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(0.698)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:05:34.000 +epprd_rg:cl_deactivate_fs(0.698)[fs_umount:434] umount /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(0.766)[fs_umount:437] : Unmount of /oracle/EPP/sapdata4 worked. Can stop now. +epprd_rg:cl_deactivate_fs(0.766)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(0.766)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(0.766)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:34.701707 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:34.701707|INFO: Deactivating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(0.795)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(0.795)[fs_umount:687] print -- 0 /dev/sapdata4lv /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(0.795)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.796)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata3[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata3[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata3[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/sapdata3 cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.796)[fs_umount:313] FS=/oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(0.796)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(0.796)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(0.796)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(0.796)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.796)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(0.796)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(0.816)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(0.816)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(0.816)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(0.816)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(0.816)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(0.816)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(0.816)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(0.816)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(0.816)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(0.816)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(0.816)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(0.816)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(0.816)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(0.816)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(0.816)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(0.816)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(0.816)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(0.818)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(0.818)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(0.822)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(0.822)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(0.822)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(0.822)[fs_umount:367] lsfs -c /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(0.826)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata3:/dev/sapdata3lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(0.826)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(0.827)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata3:/dev/sapdata3lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(0.827)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(0.827)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(0.827)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(0.829)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(0.830)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(0.830)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(0.831)[fs_umount:394] awk '{ if ( $1 == "/dev/sapdata3lv" ) print $2 }' +epprd_rg:cl_deactivate_fs(0.831)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(0.831)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(0.836)[fs_umount:394] FS_MOUNTED=/oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(0.836)[fs_umount:395] [[ -n /oracle/EPP/sapdata3 ]] +epprd_rg:cl_deactivate_fs(0.836)[fs_umount:397] [[ /oracle/EPP/sapdata3 != /oracle/EPP/sapdata3 ]] +epprd_rg:cl_deactivate_fs(0.836)[fs_umount:409] [[ /oracle/EPP/sapdata3 == / ]] +epprd_rg:cl_deactivate_fs(0.836)[fs_umount:409] [[ /oracle/EPP/sapdata3 == /usr ]] +epprd_rg:cl_deactivate_fs(0.836)[fs_umount:409] [[ /oracle/EPP/sapdata3 == /dev ]] +epprd_rg:cl_deactivate_fs(0.836)[fs_umount:409] [[ /oracle/EPP/sapdata3 == /proc ]] +epprd_rg:cl_deactivate_fs(0.836)[fs_umount:409] [[ /oracle/EPP/sapdata3 == /var ]] +epprd_rg:cl_deactivate_fs(0.836)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:34.770846 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:34.770846|INFO: Deactivating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(0.865)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(0.865)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(0.865)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(0.865)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(0.867)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:05:34.000 +epprd_rg:cl_deactivate_fs(0.867)[fs_umount:434] umount /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(0.936)[fs_umount:437] : Unmount of /oracle/EPP/sapdata3 worked. Can stop now. +epprd_rg:cl_deactivate_fs(0.936)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(0.936)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(0.936)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:34.871260 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:34.871260|INFO: Deactivating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(0.965)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(0.965)[fs_umount:687] print -- 0 /dev/sapdata3lv /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(0.965)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.965)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata2[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata2[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata2[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/sapdata2 cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.965)[fs_umount:313] FS=/oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(0.965)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(0.965)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(0.965)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(0.965)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.965)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(0.965)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(0.986)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(0.986)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(0.986)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(0.986)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(0.986)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(0.986)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(0.986)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(0.986)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(0.986)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(0.986)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(0.986)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(0.986)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(0.986)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(0.986)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(0.986)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(0.986)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(0.986)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(0.988)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(0.988)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(0.992)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(0.992)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(0.992)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(0.992)[fs_umount:367] lsfs -c /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(0.995)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata2:/dev/sapdata2lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(0.995)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(0.996)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata2:/dev/sapdata2lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(0.997)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(0.997)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(0.997)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(0.999)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(0.999)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(0.999)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(1.001)[fs_umount:394] awk '{ if ( $1 == "/dev/sapdata2lv" ) print $2 }' +epprd_rg:cl_deactivate_fs(1.001)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(1.001)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(1.005)[fs_umount:394] FS_MOUNTED=/oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(1.005)[fs_umount:395] [[ -n /oracle/EPP/sapdata2 ]] +epprd_rg:cl_deactivate_fs(1.005)[fs_umount:397] [[ /oracle/EPP/sapdata2 != /oracle/EPP/sapdata2 ]] +epprd_rg:cl_deactivate_fs(1.005)[fs_umount:409] [[ /oracle/EPP/sapdata2 == / ]] +epprd_rg:cl_deactivate_fs(1.005)[fs_umount:409] [[ /oracle/EPP/sapdata2 == /usr ]] +epprd_rg:cl_deactivate_fs(1.005)[fs_umount:409] [[ /oracle/EPP/sapdata2 == /dev ]] +epprd_rg:cl_deactivate_fs(1.005)[fs_umount:409] [[ /oracle/EPP/sapdata2 == /proc ]] +epprd_rg:cl_deactivate_fs(1.005)[fs_umount:409] [[ /oracle/EPP/sapdata2 == /var ]] +epprd_rg:cl_deactivate_fs(1.005)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:34.940436 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:34.940436|INFO: Deactivating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(1.034)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(1.034)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(1.034)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(1.034)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(1.037)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:05:34.000 +epprd_rg:cl_deactivate_fs(1.037)[fs_umount:434] umount /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(1.105)[fs_umount:437] : Unmount of /oracle/EPP/sapdata2 worked. Can stop now. +epprd_rg:cl_deactivate_fs(1.105)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(1.105)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(1.105)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:35.040603 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:35.040603|INFO: Deactivating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(1.134)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(1.134)[fs_umount:687] print -- 0 /dev/sapdata2lv /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(1.134)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.134)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata1[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata1[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata1[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/sapdata1 cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.135)[fs_umount:313] FS=/oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(1.135)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(1.135)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(1.135)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(1.135)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.135)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(1.135)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(1.155)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(1.155)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(1.155)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(1.155)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(1.155)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(1.155)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(1.155)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(1.155)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(1.155)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(1.155)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(1.155)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(1.155)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(1.155)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(1.155)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(1.155)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(1.155)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(1.155)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(1.157)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(1.157)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(1.161)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(1.161)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(1.161)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(1.161)[fs_umount:367] lsfs -c /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(1.164)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata1:/dev/sapdata1lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(1.165)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(1.166)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata1:/dev/sapdata1lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(1.166)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(1.166)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(1.167)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(1.169)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(1.169)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(1.169)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(1.170)[fs_umount:394] awk '{ if ( $1 == "/dev/sapdata1lv" ) print $2 }' +epprd_rg:cl_deactivate_fs(1.170)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(1.170)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(1.175)[fs_umount:394] FS_MOUNTED=/oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(1.175)[fs_umount:395] [[ -n /oracle/EPP/sapdata1 ]] +epprd_rg:cl_deactivate_fs(1.175)[fs_umount:397] [[ /oracle/EPP/sapdata1 != /oracle/EPP/sapdata1 ]] +epprd_rg:cl_deactivate_fs(1.175)[fs_umount:409] [[ /oracle/EPP/sapdata1 == / ]] +epprd_rg:cl_deactivate_fs(1.175)[fs_umount:409] [[ /oracle/EPP/sapdata1 == /usr ]] +epprd_rg:cl_deactivate_fs(1.175)[fs_umount:409] [[ /oracle/EPP/sapdata1 == /dev ]] +epprd_rg:cl_deactivate_fs(1.175)[fs_umount:409] [[ /oracle/EPP/sapdata1 == /proc ]] +epprd_rg:cl_deactivate_fs(1.175)[fs_umount:409] [[ /oracle/EPP/sapdata1 == /var ]] +epprd_rg:cl_deactivate_fs(1.175)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:35.109977 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:35.109977|INFO: Deactivating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(1.204)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(1.204)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(1.204)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(1.204)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(1.207)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:05:35.000 +epprd_rg:cl_deactivate_fs(1.207)[fs_umount:434] umount /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(1.275)[fs_umount:437] : Unmount of /oracle/EPP/sapdata1 worked. Can stop now. +epprd_rg:cl_deactivate_fs(1.275)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(1.275)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(1.275)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:35.210532 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:35.210532|INFO: Deactivating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(1.304)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(1.304)[fs_umount:687] print -- 0 /dev/sapdata1lv /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(1.304)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.304)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/origlogB[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs:/oracle/EPP/origlogB[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/origlogB[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/origlogB cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.304)[fs_umount:313] FS=/oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(1.304)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(1.304)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(1.305)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(1.305)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.305)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(1.305)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(1.325)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(1.325)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(1.325)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(1.325)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(1.325)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(1.325)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(1.325)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(1.325)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(1.325)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(1.325)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(1.325)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(1.325)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(1.325)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(1.325)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(1.325)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(1.325)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(1.325)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(1.327)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(1.327)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(1.331)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(1.331)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(1.331)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(1.331)[fs_umount:367] lsfs -c /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(1.334)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogB:/dev/origlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(1.335)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(1.336)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogB:/dev/origlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(1.336)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(1.336)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(1.337)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(1.338)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(1.339)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(1.339)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(1.340)[fs_umount:394] awk '{ if ( $1 == "/dev/origlogBlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(1.340)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(1.340)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(1.345)[fs_umount:394] FS_MOUNTED=/oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(1.345)[fs_umount:395] [[ -n /oracle/EPP/origlogB ]] +epprd_rg:cl_deactivate_fs(1.345)[fs_umount:397] [[ /oracle/EPP/origlogB != /oracle/EPP/origlogB ]] +epprd_rg:cl_deactivate_fs(1.345)[fs_umount:409] [[ /oracle/EPP/origlogB == / ]] +epprd_rg:cl_deactivate_fs(1.345)[fs_umount:409] [[ /oracle/EPP/origlogB == /usr ]] +epprd_rg:cl_deactivate_fs(1.345)[fs_umount:409] [[ /oracle/EPP/origlogB == /dev ]] +epprd_rg:cl_deactivate_fs(1.345)[fs_umount:409] [[ /oracle/EPP/origlogB == /proc ]] +epprd_rg:cl_deactivate_fs(1.345)[fs_umount:409] [[ /oracle/EPP/origlogB == /var ]] +epprd_rg:cl_deactivate_fs(1.345)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:35.279758 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:35.279758|INFO: Deactivating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(1.373)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(1.374)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(1.374)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(1.374)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(1.376)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:05:35.000 +epprd_rg:cl_deactivate_fs(1.376)[fs_umount:434] umount /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(1.444)[fs_umount:437] : Unmount of /oracle/EPP/origlogB worked. Can stop now. +epprd_rg:cl_deactivate_fs(1.444)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(1.444)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(1.444)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:35.379104 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:35.379104|INFO: Deactivating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(1.473)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(1.473)[fs_umount:687] print -- 0 /dev/origlogBlv /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(1.473)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.473)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/origlogA[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs:/oracle/EPP/origlogA[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/origlogA[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/origlogA cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.473)[fs_umount:313] FS=/oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(1.473)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(1.473)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(1.473)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(1.473)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.473)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(1.473)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(1.493)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(1.493)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(1.493)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(1.493)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(1.493)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(1.493)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(1.493)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(1.494)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(1.494)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(1.494)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(1.494)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(1.494)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(1.494)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(1.494)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(1.494)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(1.494)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(1.494)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(1.495)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(1.495)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(1.500)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(1.500)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(1.500)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(1.500)[fs_umount:367] lsfs -c /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(1.503)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogA:/dev/origlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(1.503)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(1.504)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogA:/dev/origlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(1.505)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(1.505)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(1.506)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(1.507)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(1.507)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(1.507)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(1.508)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(1.509)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(1.509)[fs_umount:394] awk '{ if ( $1 == "/dev/origlogAlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(1.513)[fs_umount:394] FS_MOUNTED=/oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(1.513)[fs_umount:395] [[ -n /oracle/EPP/origlogA ]] +epprd_rg:cl_deactivate_fs(1.513)[fs_umount:397] [[ /oracle/EPP/origlogA != /oracle/EPP/origlogA ]] +epprd_rg:cl_deactivate_fs(1.513)[fs_umount:409] [[ /oracle/EPP/origlogA == / ]] +epprd_rg:cl_deactivate_fs(1.513)[fs_umount:409] [[ /oracle/EPP/origlogA == /usr ]] +epprd_rg:cl_deactivate_fs(1.513)[fs_umount:409] [[ /oracle/EPP/origlogA == /dev ]] +epprd_rg:cl_deactivate_fs(1.513)[fs_umount:409] [[ /oracle/EPP/origlogA == /proc ]] +epprd_rg:cl_deactivate_fs(1.513)[fs_umount:409] [[ /oracle/EPP/origlogA == /var ]] +epprd_rg:cl_deactivate_fs(1.513)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:35.447806 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:35.447806|INFO: Deactivating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(1.542)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(1.542)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(1.542)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(1.542)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(1.544)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:05:35.000 +epprd_rg:cl_deactivate_fs(1.544)[fs_umount:434] umount /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(1.613)[fs_umount:437] : Unmount of /oracle/EPP/origlogA worked. Can stop now. +epprd_rg:cl_deactivate_fs(1.613)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(1.613)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(1.613)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:35.548315 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:35.548315|INFO: Deactivating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(1.642)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(1.642)[fs_umount:687] print -- 0 /dev/origlogAlv /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(1.642)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.642)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/oraarch[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs:/oracle/EPP/oraarch[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/oraarch[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/oraarch cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.642)[fs_umount:313] FS=/oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(1.642)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(1.642)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(1.642)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(1.642)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.642)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(1.642)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(1.662)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(1.663)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(1.663)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(1.663)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(1.663)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(1.663)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(1.663)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(1.663)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(1.663)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(1.663)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(1.663)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(1.663)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(1.663)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(1.663)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(1.663)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(1.663)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(1.663)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(1.664)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(1.664)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(1.669)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(1.669)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(1.669)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(1.669)[fs_umount:367] lsfs -c /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(1.672)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/oraarch:/dev/oraarchlv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(1.672)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(1.673)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/oraarch:/dev/oraarchlv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(1.674)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(1.675)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(1.675)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(1.676)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(1.676)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(1.676)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(1.678)[fs_umount:394] awk '{ if ( $1 == "/dev/oraarchlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(1.678)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(1.678)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(1.682)[fs_umount:394] FS_MOUNTED=/oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(1.682)[fs_umount:395] [[ -n /oracle/EPP/oraarch ]] +epprd_rg:cl_deactivate_fs(1.682)[fs_umount:397] [[ /oracle/EPP/oraarch != /oracle/EPP/oraarch ]] +epprd_rg:cl_deactivate_fs(1.682)[fs_umount:409] [[ /oracle/EPP/oraarch == / ]] +epprd_rg:cl_deactivate_fs(1.682)[fs_umount:409] [[ /oracle/EPP/oraarch == /usr ]] +epprd_rg:cl_deactivate_fs(1.682)[fs_umount:409] [[ /oracle/EPP/oraarch == /dev ]] +epprd_rg:cl_deactivate_fs(1.682)[fs_umount:409] [[ /oracle/EPP/oraarch == /proc ]] +epprd_rg:cl_deactivate_fs(1.682)[fs_umount:409] [[ /oracle/EPP/oraarch == /var ]] +epprd_rg:cl_deactivate_fs(1.682)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:35.616796 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:35.616796|INFO: Deactivating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(1.711)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(1.711)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(1.711)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(1.711)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(1.713)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:05:35.000 +epprd_rg:cl_deactivate_fs(1.713)[fs_umount:434] umount /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(1.782)[fs_umount:437] : Unmount of /oracle/EPP/oraarch worked. Can stop now. +epprd_rg:cl_deactivate_fs(1.782)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(1.782)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(1.782)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:35.717205 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:35.717205|INFO: Deactivating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(1.811)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(1.811)[fs_umount:687] print -- 0 /dev/oraarchlv /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(1.811)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.811)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/mirrlogB[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs:/oracle/EPP/mirrlogB[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/mirrlogB[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/mirrlogB cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.811)[fs_umount:313] FS=/oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(1.811)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(1.811)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(1.811)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(1.811)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.811)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(1.811)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(1.831)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(1.831)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(1.831)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(1.831)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(1.831)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(1.831)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(1.831)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(1.831)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(1.831)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(1.831)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(1.831)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(1.831)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(1.831)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(1.832)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(1.832)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(1.832)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(1.832)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(1.833)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(1.833)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(1.837)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(1.837)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(1.838)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(1.838)[fs_umount:367] lsfs -c /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(1.841)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogB:/dev/mirrlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(1.841)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(1.842)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogB:/dev/mirrlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(1.842)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(1.843)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(1.844)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(1.845)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(1.845)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(1.845)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(1.846)[fs_umount:394] awk '{ if ( $1 == "/dev/mirrlogBlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(1.846)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(1.847)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(1.851)[fs_umount:394] FS_MOUNTED=/oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(1.851)[fs_umount:395] [[ -n /oracle/EPP/mirrlogB ]] +epprd_rg:cl_deactivate_fs(1.851)[fs_umount:397] [[ /oracle/EPP/mirrlogB != /oracle/EPP/mirrlogB ]] +epprd_rg:cl_deactivate_fs(1.851)[fs_umount:409] [[ /oracle/EPP/mirrlogB == / ]] +epprd_rg:cl_deactivate_fs(1.851)[fs_umount:409] [[ /oracle/EPP/mirrlogB == /usr ]] +epprd_rg:cl_deactivate_fs(1.851)[fs_umount:409] [[ /oracle/EPP/mirrlogB == /dev ]] +epprd_rg:cl_deactivate_fs(1.851)[fs_umount:409] [[ /oracle/EPP/mirrlogB == /proc ]] +epprd_rg:cl_deactivate_fs(1.851)[fs_umount:409] [[ /oracle/EPP/mirrlogB == /var ]] +epprd_rg:cl_deactivate_fs(1.851)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:35.787664 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:35.787664|INFO: Deactivating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(1.881)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(1.881)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(1.882)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(1.882)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(1.885)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:05:35.000 +epprd_rg:cl_deactivate_fs(1.885)[fs_umount:434] umount /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(1.953)[fs_umount:437] : Unmount of /oracle/EPP/mirrlogB worked. Can stop now. +epprd_rg:cl_deactivate_fs(1.953)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(1.953)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(1.953)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:35.887867 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:35.887867|INFO: Deactivating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(1.982)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(1.982)[fs_umount:687] print -- 0 /dev/mirrlogBlv /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(1.982)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.982)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/mirrlogA[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs:/oracle/EPP/mirrlogA[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/mirrlogA[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/mirrlogA cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.982)[fs_umount:313] FS=/oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(1.982)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(1.982)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(1.982)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(1.982)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(1.982)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(1.982)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(2.002)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(2.002)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(2.002)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(2.002)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(2.002)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(2.002)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(2.002)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(2.002)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(2.002)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(2.002)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(2.002)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(2.002)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(2.002)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(2.002)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(2.002)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(2.002)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(2.002)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(2.004)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(2.004)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(2.008)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(2.008)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(2.008)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(2.008)[fs_umount:367] lsfs -c /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(2.011)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogA:/dev/mirrlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(2.011)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(2.012)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogA:/dev/mirrlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(2.013)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(2.014)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(2.014)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(2.015)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(2.015)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(2.015)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(2.017)[fs_umount:394] awk '{ if ( $1 == "/dev/mirrlogAlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(2.017)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(2.017)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(2.021)[fs_umount:394] FS_MOUNTED=/oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(2.021)[fs_umount:395] [[ -n /oracle/EPP/mirrlogA ]] +epprd_rg:cl_deactivate_fs(2.021)[fs_umount:397] [[ /oracle/EPP/mirrlogA != /oracle/EPP/mirrlogA ]] +epprd_rg:cl_deactivate_fs(2.021)[fs_umount:409] [[ /oracle/EPP/mirrlogA == / ]] +epprd_rg:cl_deactivate_fs(2.021)[fs_umount:409] [[ /oracle/EPP/mirrlogA == /usr ]] +epprd_rg:cl_deactivate_fs(2.021)[fs_umount:409] [[ /oracle/EPP/mirrlogA == /dev ]] +epprd_rg:cl_deactivate_fs(2.021)[fs_umount:409] [[ /oracle/EPP/mirrlogA == /proc ]] +epprd_rg:cl_deactivate_fs(2.021)[fs_umount:409] [[ /oracle/EPP/mirrlogA == /var ]] +epprd_rg:cl_deactivate_fs(2.021)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:35.956359 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:35.956359|INFO: Deactivating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(2.050)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(2.050)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(2.050)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(2.050)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(2.053)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:05:35.000 +epprd_rg:cl_deactivate_fs(2.053)[fs_umount:434] umount /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(2.121)[fs_umount:437] : Unmount of /oracle/EPP/mirrlogA worked. Can stop now. +epprd_rg:cl_deactivate_fs(2.121)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(2.121)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(2.121)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:36.056397 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:36.056397|INFO: Deactivating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(2.150)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(2.150)[fs_umount:687] print -- 0 /dev/mirrlogAlv /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(2.150)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.150)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP +epprd_rg:cl_deactivate_fs:/oracle/EPP[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP[deactivate_fs_process_resources:770] fs_umount /oracle/EPP cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.150)[fs_umount:313] FS=/oracle/EPP +epprd_rg:cl_deactivate_fs(2.150)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(2.150)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(2.150)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(2.150)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.150)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(2.150)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(2.171)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(2.171)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(2.171)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(2.171)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(2.171)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(2.171)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(2.171)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(2.171)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(2.171)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(2.171)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(2.171)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(2.171)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(2.171)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(2.171)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(2.171)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(2.171)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(2.171)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(2.172)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP +epprd_rg:cl_deactivate_fs(2.172)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(2.177)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(2.177)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(2.177)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(2.177)[fs_umount:367] lsfs -c /oracle/EPP +epprd_rg:cl_deactivate_fs(2.180)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP:/dev/epplv:jfs2:::62914560:rw:no:no' +epprd_rg:cl_deactivate_fs(2.180)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(2.181)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP:/dev/epplv:jfs2:::62914560:rw:no:no' +epprd_rg:cl_deactivate_fs(2.182)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(2.183)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(2.183)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(2.184)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(2.184)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(2.184)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(2.186)[fs_umount:394] awk '{ if ( $1 == "/dev/epplv" ) print $2 }' +epprd_rg:cl_deactivate_fs(2.186)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(2.186)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(2.190)[fs_umount:394] FS_MOUNTED=/oracle/EPP +epprd_rg:cl_deactivate_fs(2.190)[fs_umount:395] [[ -n /oracle/EPP ]] +epprd_rg:cl_deactivate_fs(2.190)[fs_umount:397] [[ /oracle/EPP != /oracle/EPP ]] +epprd_rg:cl_deactivate_fs(2.190)[fs_umount:409] [[ /oracle/EPP == / ]] +epprd_rg:cl_deactivate_fs(2.190)[fs_umount:409] [[ /oracle/EPP == /usr ]] +epprd_rg:cl_deactivate_fs(2.190)[fs_umount:409] [[ /oracle/EPP == /dev ]] +epprd_rg:cl_deactivate_fs(2.190)[fs_umount:409] [[ /oracle/EPP == /proc ]] +epprd_rg:cl_deactivate_fs(2.190)[fs_umount:409] [[ /oracle/EPP == /var ]] +epprd_rg:cl_deactivate_fs(2.190)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:36.124580 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:36.124580|INFO: Deactivating Filesystem|/oracle/EPP' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(2.218)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP +epprd_rg:cl_deactivate_fs(2.218)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(2.218)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(2.218)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(2.221)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:05:36.000 +epprd_rg:cl_deactivate_fs(2.221)[fs_umount:434] umount /oracle/EPP +epprd_rg:cl_deactivate_fs(2.289)[fs_umount:437] : Unmount of /oracle/EPP worked. Can stop now. +epprd_rg:cl_deactivate_fs(2.289)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(2.289)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(2.289)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:36.223987 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:36.223987|INFO: Deactivating Filesystem|/oracle/EPP' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(2.318)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(2.318)[fs_umount:687] print -- 0 /dev/epplv /oracle/EPP +epprd_rg:cl_deactivate_fs(2.318)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.318)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle[deactivate_fs_process_resources:764] PS4_LOOP=/oracle +epprd_rg:cl_deactivate_fs:/oracle[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle[deactivate_fs_process_resources:770] fs_umount /oracle cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.318)[fs_umount:313] FS=/oracle +epprd_rg:cl_deactivate_fs(2.318)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(2.318)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(2.318)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(2.318)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.318)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(2.318)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(2.338)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(2.338)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(2.338)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(2.338)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(2.338)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(2.338)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(2.338)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(2.338)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(2.338)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(2.338)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(2.338)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(2.338)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(2.338)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(2.338)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(2.338)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(2.338)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(2.338)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(2.340)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle +epprd_rg:cl_deactivate_fs(2.340)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(2.344)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(2.344)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(2.344)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(2.344)[fs_umount:367] lsfs -c /oracle +epprd_rg:cl_deactivate_fs(2.348)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle:/dev/oraclelv:jfs2:::41943040:rw:no:no' +epprd_rg:cl_deactivate_fs(2.348)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(2.349)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle:/dev/oraclelv:jfs2:::41943040:rw:no:no' +epprd_rg:cl_deactivate_fs(2.350)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(2.350)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(2.350)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(2.352)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(2.352)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(2.352)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(2.353)[fs_umount:394] awk '{ if ( $1 == "/dev/oraclelv" ) print $2 }' +epprd_rg:cl_deactivate_fs(2.353)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(2.354)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(2.358)[fs_umount:394] FS_MOUNTED=/oracle +epprd_rg:cl_deactivate_fs(2.358)[fs_umount:395] [[ -n /oracle ]] +epprd_rg:cl_deactivate_fs(2.358)[fs_umount:397] [[ /oracle != /oracle ]] +epprd_rg:cl_deactivate_fs(2.358)[fs_umount:409] [[ /oracle == / ]] +epprd_rg:cl_deactivate_fs(2.358)[fs_umount:409] [[ /oracle == /usr ]] +epprd_rg:cl_deactivate_fs(2.358)[fs_umount:409] [[ /oracle == /dev ]] +epprd_rg:cl_deactivate_fs(2.358)[fs_umount:409] [[ /oracle == /proc ]] +epprd_rg:cl_deactivate_fs(2.358)[fs_umount:409] [[ /oracle == /var ]] +epprd_rg:cl_deactivate_fs(2.358)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:36.292194 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:36.292194|INFO: Deactivating Filesystem|/oracle' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(2.386)[fs_umount:427] : Try up to 60 times to unmount /oracle +epprd_rg:cl_deactivate_fs(2.386)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(2.386)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(2.386)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(2.389)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:05:36.000 +epprd_rg:cl_deactivate_fs(2.389)[fs_umount:434] umount /oracle +epprd_rg:cl_deactivate_fs(2.458)[fs_umount:437] : Unmount of /oracle worked. Can stop now. +epprd_rg:cl_deactivate_fs(2.458)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(2.458)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(2.458)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:36.393164 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:36.393164|INFO: Deactivating Filesystem|/oracle' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(2.487)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(2.487)[fs_umount:687] print -- 0 /dev/oraclelv /oracle +epprd_rg:cl_deactivate_fs(2.487)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.487)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/board_org[deactivate_fs_process_resources:764] PS4_LOOP=/board_org +epprd_rg:cl_deactivate_fs:/board_org[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/board_org[deactivate_fs_process_resources:770] fs_umount /board_org cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.487)[fs_umount:313] FS=/board_org +epprd_rg:cl_deactivate_fs(2.487)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(2.487)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(2.487)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(2.487)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.487)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(2.487)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(2.507)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(2.507)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(2.507)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(2.507)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(2.507)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(2.507)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(2.507)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(2.507)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(2.507)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(2.507)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(2.507)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(2.507)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(2.507)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(2.507)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(2.507)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(2.507)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(2.507)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(2.509)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/board_org +epprd_rg:cl_deactivate_fs(2.509)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(2.514)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(2.514)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(2.514)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(2.514)[fs_umount:367] lsfs -c /board_org +epprd_rg:cl_deactivate_fs(2.517)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(2.517)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(2.518)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(2.519)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(2.519)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(2.520)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(2.521)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(2.521)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(2.521)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(2.523)[fs_umount:394] awk '{ if ( $1 == "/dev/boardlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(2.523)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(2.523)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(2.527)[fs_umount:394] FS_MOUNTED=/board_org +epprd_rg:cl_deactivate_fs(2.527)[fs_umount:395] [[ -n /board_org ]] +epprd_rg:cl_deactivate_fs(2.527)[fs_umount:397] [[ /board_org != /board_org ]] +epprd_rg:cl_deactivate_fs(2.527)[fs_umount:409] [[ /board_org == / ]] +epprd_rg:cl_deactivate_fs(2.527)[fs_umount:409] [[ /board_org == /usr ]] +epprd_rg:cl_deactivate_fs(2.527)[fs_umount:409] [[ /board_org == /dev ]] +epprd_rg:cl_deactivate_fs(2.527)[fs_umount:409] [[ /board_org == /proc ]] +epprd_rg:cl_deactivate_fs(2.527)[fs_umount:409] [[ /board_org == /var ]] +epprd_rg:cl_deactivate_fs(2.527)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/board_org' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:36.461254 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:36.461254|INFO: Deactivating Filesystem|/board_org' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(2.555)[fs_umount:427] : Try up to 60 times to unmount /board_org +epprd_rg:cl_deactivate_fs(2.555)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(2.555)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(2.555)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(2.558)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 18:05:36.000 +epprd_rg:cl_deactivate_fs(2.558)[fs_umount:434] umount /board_org +epprd_rg:cl_deactivate_fs(2.627)[fs_umount:437] : Unmount of /board_org worked. Can stop now. +epprd_rg:cl_deactivate_fs(2.627)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(2.627)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(2.627)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/board_org' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T18:05:36.562089 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T18:05:36.562089|INFO: Deactivating Filesystem|/board_org' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(2.656)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(2.656)[fs_umount:687] print -- 0 /dev/boardlv /board_org +epprd_rg:cl_deactivate_fs(2.656)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(2.656)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/board_org[deactivate_fs_process_resources:773] unset PS4_LOOP +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:777] [[ -n '' ]] +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:786] ALLNOERROR=All_non_error_filesystems +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:788] : update resource manager +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:790] cl_RMupdate resource_down All_non_error_filesystems cl_deactivate_fs 2023-01-28T18:05:36.584982 2023-01-28T18:05:36.589391 +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:794] : Check to see how the unmounts went +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:796] [[ -s /tmp/epprd_rg_deactivate_fs.tmp ]] +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:798] grep -qw ^1 /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:805] grep -qw ^11 /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:814] : All unmounts successful +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:816] STATUS=0 +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:817] rm -f /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:821] return 0 +epprd_rg:cl_deactivate_fs[924] exit 0 +epprd_rg:process_resources[process_file_systems:2668] RC=0 +epprd_rg:process_resources[process_file_systems:2669] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[process_file_systems:2681] (( 0 != 0 )) +epprd_rg:process_resources[process_file_systems:2687] return 0 +epprd_rg:process_resources[3483] RC=0 +epprd_rg:process_resources[3485] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3487] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:05:36.611451 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=VGS ACTION=RELEASE VOLUME_GROUPS='"datavg"' RESOURCE_GROUPS='"epprd_rg' '"' EXPORT_FILESYSTEM='"TRUE"' +epprd_rg:process_resources[1] JOB_TYPE=VGS +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] VOLUME_GROUPS=datavg +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] EXPORT_FILESYSTEM=TRUE +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ VGS == RELEASE ]] +epprd_rg:process_resources[3360] [[ VGS == ONLINE ]] +epprd_rg:process_resources[3571] process_volume_groups_main RELEASE +epprd_rg:process_resources[process_volume_groups_main:2293] PS4_FUNC=process_volume_groups_main +epprd_rg:process_resources[process_volume_groups_main:2293] typeset PS4_FUNC +epprd_rg:process_resources[process_volume_groups_main:2294] [[ high == high ]] +epprd_rg:process_resources[process_volume_groups_main:2294] set -x +epprd_rg:process_resources[process_volume_groups_main:2295] DEF_VARYON_ACTION=0 +epprd_rg:process_resources[process_volume_groups_main:2295] typeset -li DEF_VARYON_ACTION +epprd_rg:process_resources[process_volume_groups_main:2296] FAILURE_IN_METHOD=0 +epprd_rg:process_resources[process_volume_groups_main:2296] typeset -li FAILURE_IN_METHOD +epprd_rg:process_resources[process_volume_groups_main:2297] ACTION=RELEASE +epprd_rg:process_resources[process_volume_groups_main:2297] typeset ACTION +epprd_rg:process_resources[process_volume_groups_main:2298] STAT=0 +epprd_rg:process_resources[process_volume_groups_main:2299] VG_LIST=datavg +epprd_rg:process_resources[process_volume_groups_main:2300] RG_LIST=epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2304] getReplicatedResources epprd_rg +epprd_rg:process_resources[getReplicatedResources:699] PS4_FUNC=getReplicatedResources +epprd_rg:process_resources[getReplicatedResources:699] typeset PS4_FUNC +epprd_rg:process_resources[getReplicatedResources:700] [[ high == high ]] +epprd_rg:process_resources[getReplicatedResources:700] set -x +epprd_rg:process_resources[getReplicatedResources:702] RV=false +epprd_rg:process_resources[getReplicatedResources:704] clodmget -n -f type HACMPrresmethods +epprd_rg:process_resources[getReplicatedResources:704] [[ -n 9 ]] +epprd_rg:process_resources[getReplicatedResources:707] : Replicated resource methods are defined, check for resources +epprd_rg:process_resources[getReplicatedResources:709] clodmget -q $'name like \'*_REP_RESOURCE\' AND group=epprd_rg' -f value -n HACMPresource +epprd_rg:process_resources[getReplicatedResources:709] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:718] : Verify if any backup profiles are configured and trigger cbm utilities based on that +epprd_rg:process_resources[getReplicatedResources:720] clodmget -q name=BACKUP_ENABLED -f value HACMPresource +epprd_rg:process_resources[getReplicatedResources:720] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:739] echo false +epprd_rg:process_resources[process_volume_groups_main:2304] REPLICATED_RESOURCES=false +epprd_rg:process_resources[process_volume_groups_main:2305] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2306] print -- datavg +epprd_rg:process_resources[process_volume_groups_main:2306] read VOLUME_GROUPS VG_LIST +epprd_rg:process_resources[process_volume_groups_main:2306] IFS=: +epprd_rg:process_resources[process_volume_groups_main:2307] VOLUME_GROUPS=datavg +epprd_rg:process_resources[process_volume_groups_main:2310] : At this point, these variables contain information only for epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2312] export VOLUME_GROUPS +epprd_rg:process_resources[process_volume_groups_main:2313] export RESOURCE_GROUPS +epprd_rg:process_resources[process_volume_groups_main:2315] [[ false == true ]] +epprd_rg:process_resources[process_volume_groups_main:2555] process_volume_groups RELEASE +epprd_rg:process_resources[process_volume_groups:2571] PS4_FUNC=process_volume_groups +epprd_rg:process_resources[process_volume_groups:2571] typeset PS4_FUNC +epprd_rg:process_resources[process_volume_groups:2572] [[ high == high ]] +epprd_rg:process_resources[process_volume_groups:2572] set -x +epprd_rg:process_resources[process_volume_groups:2573] STAT=0 +epprd_rg:process_resources[process_volume_groups:2575] GROUPNAME=epprd_rg +epprd_rg:process_resources[process_volume_groups:2575] export GROUPNAME +epprd_rg:process_resources[process_volume_groups:2578] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[process_volume_groups:2603] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[process_volume_groups:2605] cl_deactivate_vgs -n +epprd_rg:cl_deactivate_vgs[458] version=%I% +epprd_rg:cl_deactivate_vgs[461] STATUS=0 +epprd_rg:cl_deactivate_vgs[461] typeset -li STATUS +epprd_rg:cl_deactivate_vgs[462] TMP_VARYOFF_STATUS=/tmp/_deactivate_vgs.tmp +epprd_rg:cl_deactivate_vgs[463] sddsrv_off=FALSE +epprd_rg:cl_deactivate_vgs[464] ALLVGS=All_volume_groups +epprd_rg:cl_deactivate_vgs[465] OEM_CALL=false +epprd_rg:cl_deactivate_vgs[467] (( 1 != 0 )) +epprd_rg:cl_deactivate_vgs[467] [[ -n == -c ]] +epprd_rg:cl_deactivate_vgs[476] EVENT_TYPE=RELEASE_PRIMARY +epprd_rg:cl_deactivate_vgs[477] EVENT_TYPE=RELEASE_PRIMARY +epprd_rg:cl_deactivate_vgs[480] : if JOB_TYPE is set and is not $'\'GROUP\',' then process_resources is parent +epprd_rg:cl_deactivate_vgs[482] [[ VGS != 0 ]] +epprd_rg:cl_deactivate_vgs[482] [[ VGS != GROUP ]] +epprd_rg:cl_deactivate_vgs[485] : parameters passed from process_resources thru environment +epprd_rg:cl_deactivate_vgs[487] PROC_RES=true +epprd_rg:cl_deactivate_vgs[501] : set -u will report an error if any variable used in the script is not set +epprd_rg:cl_deactivate_vgs[503] set -u +epprd_rg:cl_deactivate_vgs[506] : Remove the status file if it currently exists +epprd_rg:cl_deactivate_vgs[508] rm -f /tmp/_deactivate_vgs.tmp +epprd_rg:cl_deactivate_vgs[511] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_deactivate_vgs[512] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_deactivate_vgs[513] : 99.99.999.999 +epprd_rg:cl_deactivate_vgs[515] typeset -li V R M F +epprd_rg:cl_deactivate_vgs[516] typeset -Z2 R +epprd_rg:cl_deactivate_vgs[517] typeset -Z3 M +epprd_rg:cl_deactivate_vgs[518] typeset -Z3 F +epprd_rg:cl_deactivate_vgs[519] VRMF=0 +epprd_rg:cl_deactivate_vgs[519] typeset -li VRMF +epprd_rg:cl_deactivate_vgs[528] ls '/dev/vpath*' +epprd_rg:cl_deactivate_vgs[528] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_vgs[595] : Special processing for 2-node NFS clusters +epprd_rg:cl_deactivate_vgs[597] TWO_NODE_CLUSTER=FALSE +epprd_rg:cl_deactivate_vgs[597] export TWO_NODE_CLUSTER +epprd_rg:cl_deactivate_vgs[598] FS_TYPES='jsf2?log' +epprd_rg:cl_deactivate_vgs[598] export FS_TYPES +epprd_rg:cl_deactivate_vgs[599] wc -l +epprd_rg:cl_deactivate_vgs[599] clodmget -q 'object = VERBOSE_LOGGING' -f name -n HACMPnode +epprd_rg:cl_deactivate_vgs[599] (( 2 == 2 )) +epprd_rg:cl_deactivate_vgs[600] [[ -n TRUE ]] +epprd_rg:cl_deactivate_vgs[602] : two nodes, with exported filesystems +epprd_rg:cl_deactivate_vgs[603] TWO_NODE_CLUSTER=TRUE +epprd_rg:cl_deactivate_vgs[603] export TWO_NODE_CLUSTER +epprd_rg:cl_deactivate_vgs[607] : Pick up a list of currently varyd on volume groups +epprd_rg:cl_deactivate_vgs[609] lsvg -L -o +epprd_rg:cl_deactivate_vgs[609] 2> /tmp/lsvg.err +epprd_rg:cl_deactivate_vgs[609] VG_ON_LIST=$'datavg\ncaavg_private\nrootvg' +epprd_rg:cl_deactivate_vgs[612] : if not called from process_resources, use old-style environment and parameters +epprd_rg:cl_deactivate_vgs[614] [[ true == false ]] +epprd_rg:cl_deactivate_vgs[672] : Called from process_resources +epprd_rg:cl_deactivate_vgs[674] LIST_OF_VOLUME_GROUPS_FOR_RG='' +epprd_rg:cl_deactivate_vgs[679] export GROUPNAME +epprd_rg:cl_deactivate_vgs[681] : Discover the volume groups for this resource group. +epprd_rg:cl_deactivate_vgs[686] echo datavg +epprd_rg:cl_deactivate_vgs[686] read LIST_OF_VOLUME_GROUPS_FOR_RG VOLUME_GROUPS +epprd_rg:cl_deactivate_vgs[686] IFS=: +epprd_rg:cl_deactivate_vgs[689] : Reverse the order, so that VGs release in reverse order of acquisition +epprd_rg:cl_deactivate_vgs[693] sed 's/ /,/g' +epprd_rg:cl_deactivate_vgs[693] echo datavg +epprd_rg:cl_deactivate_vgs[693] LIST_OF_COMMASEP_VG_FOR_RG=datavg +epprd_rg:cl_deactivate_vgs[694] echo datavg +epprd_rg:cl_deactivate_vgs[695] tr , '\n' +epprd_rg:cl_deactivate_vgs[695] egrep -v -w $'rootvg|caavg_private\n |altinst_rootvg|old_rootvg' +epprd_rg:cl_deactivate_vgs[696] sort -ru +epprd_rg:cl_deactivate_vgs[694] LIST_OF_VOLUME_GROUPS_FOR_RG=datavg +epprd_rg:cl_deactivate_vgs[698] : Update Resource Manager - releasing VGs for this RG +epprd_rg:cl_deactivate_vgs[700] cl_RMupdate resource_releasing All_volume_groups cl_deactivate_vgs 2023-01-28T18:05:36.699608 2023-01-28T18:05:36.704096 +epprd_rg:cl_deactivate_vgs[703] : Process the volume groups for this resource group +epprd_rg:cl_deactivate_vgs:datavg[707] PS4_LOOP=datavg +epprd_rg:cl_deactivate_vgs:datavg[711] print datavg caavg_private rootvg +epprd_rg:cl_deactivate_vgs:datavg[711] grep -qw datavg +epprd_rg:cl_deactivate_vgs:datavg[719] : Thie VG is varied on, so go vary it off. Get the VG mode first +epprd_rg:cl_deactivate_vgs:datavg[721] MODE=9999 +epprd_rg:cl_deactivate_vgs:datavg[722] /usr/sbin/getlvodm -v datavg +epprd_rg:cl_deactivate_vgs:datavg[722] VGID=00c44af100004b00000001851e9dc053 +epprd_rg:cl_deactivate_vgs:datavg[723] lqueryvg -g 00c44af100004b00000001851e9dc053 -X +epprd_rg:cl_deactivate_vgs:datavg[723] MODE=32 +epprd_rg:cl_deactivate_vgs:datavg[724] RC=0 +epprd_rg:cl_deactivate_vgs:datavg[725] (( 0 != 0 )) +epprd_rg:cl_deactivate_vgs:datavg[726] : exit status of lqueryvg -g 00c44af100004b00000001851e9dc053 -X: 0 +epprd_rg:cl_deactivate_vgs:datavg[728] vgs_varyoff datavg 32 +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:60] PS4_TIMER=true +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:60] typeset PS4_TIMER +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:61] [[ high == high ]] +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:61] set -x +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:63] VG=datavg +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:63] typeset VG +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:64] MODE=32 +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:64] typeset MODE +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:66] OPEN_FSs='' +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:66] typeset OPEN_FSs +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:67] OPEN_LVs='' +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:67] typeset OPEN_LVs +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:68] typeset TMP_VG_LIST +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:69] TS_FLAGS='' +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:69] typeset TS_FLAGS +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:71] STATUS=0 +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:71] typeset -li STATUS +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:72] RC=0 +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:72] typeset -li RC +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:73] SELECTIVE_FAILOVER=false +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:73] typeset SELECTIVE_FAILOVER +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:74] typeset LV +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:75] lv_list='' +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:75] typeset lv_list +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:76] typeset FS +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:77] FS_MOUNTED='' +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:77] typeset FS_MOUNTED +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:79] rc_fuser=0 +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:79] typeset -li rc_fuser +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:80] rc_varyonvg=0 +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:80] typeset -li rc_varyonvg +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:81] rc_varyoffvg=0 +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:81] typeset -li rc_varyoffvg +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:82] rc_lsvg=0 +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:82] typeset -li rc_lsvg +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:83] rc_dfs=0 +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:83] typeset -li rc_dfs +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:84] rc_dvg=0 +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:84] typeset -li rc_dvg +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:88] typeset -li FV FR FM FF +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:89] typeset -Z2 FR +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:90] typeset -Z3 FM +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:91] typeset -Z3 FF +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:92] FVRMF=0 +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:92] typeset -li FVRMF +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:93] fuser_lvl=601004000 +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:93] typeset -li fuser_lvl +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:95] lsvg -l -L datavg +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:95] 2> /dev/null +epprd_rg:cl_deactivate_vgs(0.116):datavg[vgs_varyoff:95] TMP_VG_LIST=$'datavg:\nLV NAME TYPE LPs PPs PVs LV STATE MOUNT POINT\nepprdaloglv jfs2log 1 1 1 closed/syncd N/A\nsaplv jfs2 100 100 7 closed/syncd /usr/sap\nsapmntlv jfs2 20 20 7 closed/syncd /sapmnt\noraclelv jfs2 40 40 7 closed/syncd /oracle\nepplv jfs2 60 60 7 closed/syncd /oracle/EPP\noraarchlv jfs2 100 100 7 closed/syncd /oracle/EPP/oraarch\nsapdata1lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata1\nsapdata2lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata2\nsapdata3lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata3\nsapdata4lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata4\nboardlv jfs2 10 10 7 closed/syncd /board_org\noriglogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogA\noriglogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogB\nmirrlogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogA\nmirrlogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_vgs(0.116):datavg[vgs_varyoff:96] rc_lsvg=0 +epprd_rg:cl_deactivate_vgs(0.116):datavg[vgs_varyoff:98] [[ RELEASE_PRIMARY == reconfig* ]] +epprd_rg:cl_deactivate_vgs(0.116):datavg[vgs_varyoff:114] [[ -n $'datavg:\nLV NAME TYPE LPs PPs PVs LV STATE MOUNT POINT\nepprdaloglv jfs2log 1 1 1 closed/syncd N/A\nsaplv jfs2 100 100 7 closed/syncd /usr/sap\nsapmntlv jfs2 20 20 7 closed/syncd /sapmnt\noraclelv jfs2 40 40 7 closed/syncd /oracle\nepplv jfs2 60 60 7 closed/syncd /oracle/EPP\noraarchlv jfs2 100 100 7 closed/syncd /oracle/EPP/oraarch\nsapdata1lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata1\nsapdata2lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata2\nsapdata3lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata3\nsapdata4lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata4\nboardlv jfs2 10 10 7 closed/syncd /board_org\noriglogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogA\noriglogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogB\nmirrlogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogA\nmirrlogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogB' ]] +epprd_rg:cl_deactivate_vgs(0.116):datavg[vgs_varyoff:117] : Get list of open logical volumes corresponding to filesystems +epprd_rg:cl_deactivate_vgs(0.118):datavg[vgs_varyoff:119] awk '$2 ~ /jfs2?$/ && $6 ~ /open/ {print $1}' +epprd_rg:cl_deactivate_vgs(0.118):datavg[vgs_varyoff:119] print $'datavg:\nLV NAME TYPE LPs PPs PVs LV STATE MOUNT POINT\nepprdaloglv jfs2log 1 1 1 closed/syncd N/A\nsaplv jfs2 100 100 7 closed/syncd /usr/sap\nsapmntlv jfs2 20 20 7 closed/syncd /sapmnt\noraclelv jfs2 40 40 7 closed/syncd /oracle\nepplv jfs2 60 60 7 closed/syncd /oracle/EPP\noraarchlv jfs2 100 100 7 closed/syncd /oracle/EPP/oraarch\nsapdata1lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata1\nsapdata2lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata2\nsapdata3lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata3\nsapdata4lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata4\nboardlv jfs2 10 10 7 closed/syncd /board_org\noriglogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogA\noriglogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogB\nmirrlogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogA\nmirrlogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_vgs(0.122):datavg[vgs_varyoff:119] OPEN_LVs='' +epprd_rg:cl_deactivate_vgs(0.122):datavg[vgs_varyoff:122] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_vgs(0.122):datavg[vgs_varyoff:140] [[ -n '' ]] +epprd_rg:cl_deactivate_vgs(0.122):datavg[vgs_varyoff:167] [[ TRUE == TRUE ]] +epprd_rg:cl_deactivate_vgs(0.122):datavg[vgs_varyoff:170] : For two-node clusters, special processing for the highly available NFS +epprd_rg:cl_deactivate_vgs(0.122):datavg[vgs_varyoff:171] : server function: tell NFS to dump the dup cache into the jfslog or jfs2log +epprd_rg:cl_deactivate_vgs(0.122):datavg[vgs_varyoff:175] : Find the first log device in the saved list of logical volumes +epprd_rg:cl_deactivate_vgs(0.122):datavg[vgs_varyoff:177] pattern='jsf2?log' +epprd_rg:cl_deactivate_vgs(0.123):datavg[vgs_varyoff:178] awk '$2 ~ /jsf2?log/ {printf "/dev/%s\n", $1 ; exit}' +epprd_rg:cl_deactivate_vgs(0.123):datavg[vgs_varyoff:178] print $'datavg:\nLV NAME TYPE LPs PPs PVs LV STATE MOUNT POINT\nepprdaloglv jfs2log 1 1 1 closed/syncd N/A\nsaplv jfs2 100 100 7 closed/syncd /usr/sap\nsapmntlv jfs2 20 20 7 closed/syncd /sapmnt\noraclelv jfs2 40 40 7 closed/syncd /oracle\nepplv jfs2 60 60 7 closed/syncd /oracle/EPP\noraarchlv jfs2 100 100 7 closed/syncd /oracle/EPP/oraarch\nsapdata1lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata1\nsapdata2lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata2\nsapdata3lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata3\nsapdata4lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata4\nboardlv jfs2 10 10 7 closed/syncd /board_org\noriglogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogA\noriglogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogB\nmirrlogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogA\nmirrlogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_vgs(0.127):datavg[vgs_varyoff:178] logdev='' +epprd_rg:cl_deactivate_vgs(0.127):datavg[vgs_varyoff:180] [[ -z '' ]] +epprd_rg:cl_deactivate_vgs(0.127):datavg[vgs_varyoff:181] [[ true == true ]] +epprd_rg:cl_deactivate_vgs(0.127):datavg[vgs_varyoff:182] [[ ONLINE != ONLINE ]] +epprd_rg:cl_deactivate_vgs(0.127):datavg[vgs_varyoff:216] [[ -n '' ]] +epprd_rg:cl_deactivate_vgs(0.127):datavg[vgs_varyoff:223] : Finally, vary off the volume group +epprd_rg:cl_deactivate_vgs(0.127):datavg[vgs_varyoff:226] amlog_trace '' 'Deactivating Volume Group|datavg' +epprd_rg:cl_deactivate_vgs(0.127):datavg[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_vgs(0.128):datavg[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_vgs(0.152):datavg[amlog_trace:319] cltime +epprd_rg:cl_deactivate_vgs(0.155):datavg[amlog_trace:319] DATE=2023-01-28T18:05:36.792986 +epprd_rg:cl_deactivate_vgs(0.155):datavg[amlog_trace:320] echo '|2023-01-28T18:05:36.792986|INFO: Deactivating Volume Group|datavg' +epprd_rg:cl_deactivate_vgs(0.155):datavg[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_vgs(0.155):datavg[vgs_varyoff:228] [[ 32 == 32 ]] +epprd_rg:cl_deactivate_vgs(0.155):datavg[vgs_varyoff:231] : This VG is ECM. Move to passive mode. +epprd_rg:cl_deactivate_vgs(0.155):datavg[vgs_varyoff:244] TS_FLAGS=-o +epprd_rg:cl_deactivate_vgs(0.155):datavg[vgs_varyoff:245] cltime 2023-01-28T18:05:36.795666 +epprd_rg:cl_deactivate_vgs(0.158):datavg[vgs_varyoff:246] varyonvg -c -n -P datavg +epprd_rg:cl_deactivate_vgs(0.158):datavg[vgs_varyoff:246] 2> /dev/null +epprd_rg:cl_deactivate_vgs(0.294):datavg[vgs_varyoff:247] rc_varyonvg=0 +epprd_rg:cl_deactivate_vgs(0.294):datavg[vgs_varyoff:248] : return code from varyonvg -c -n -P datavg is 0 +epprd_rg:cl_deactivate_vgs(0.294):datavg[vgs_varyoff:249] cltime 2023-01-28T18:05:36.934178 +epprd_rg:cl_deactivate_vgs(0.296):datavg[vgs_varyoff:250] (( 0 != 0 )) +epprd_rg:cl_deactivate_vgs(0.296):datavg[vgs_varyoff:277] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_vgs(0.296):datavg[vgs_varyoff:281] amlog_trace '' 'Deactivating Volume Group|datavg' +epprd_rg:cl_deactivate_vgs(0.296):datavg[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_vgs(0.297):datavg[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_vgs(0.321):datavg[amlog_trace:319] cltime +epprd_rg:cl_deactivate_vgs(0.324):datavg[amlog_trace:319] DATE=2023-01-28T18:05:36.961924 +epprd_rg:cl_deactivate_vgs(0.324):datavg[amlog_trace:320] echo '|2023-01-28T18:05:36.961924|INFO: Deactivating Volume Group|datavg' +epprd_rg:cl_deactivate_vgs(0.324):datavg[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_vgs(0.324):datavg[vgs_varyoff:284] RC=0 +epprd_rg:cl_deactivate_vgs(0.324):datavg[vgs_varyoff:287] : Update LVM volume group timestamps in ODM +epprd_rg:cl_deactivate_vgs(0.324):datavg[vgs_varyoff:289] cl_update_vg_odm_ts -o datavg +epprd_rg:cl_update_vg_odm_ts(0.001)[77] version=1.13 +epprd_rg:cl_update_vg_odm_ts(0.001)[121] o_flag='' +epprd_rg:cl_update_vg_odm_ts(0.001)[122] f_flag='' +epprd_rg:cl_update_vg_odm_ts(0.001)[123] getopts :of option +epprd_rg:cl_update_vg_odm_ts(0.001)[126] : Local timestamps should be good, since volume group was +epprd_rg:cl_update_vg_odm_ts(0.001)[127] : just varyied on or off +epprd_rg:cl_update_vg_odm_ts(0.001)[128] o_flag=TRUE +epprd_rg:cl_update_vg_odm_ts(0.001)[123] getopts :of option +epprd_rg:cl_update_vg_odm_ts(0.001)[142] shift 1 +epprd_rg:cl_update_vg_odm_ts(0.001)[144] vg_name=datavg +epprd_rg:cl_update_vg_odm_ts(0.001)[145] [[ -z datavg ]] +epprd_rg:cl_update_vg_odm_ts(0.001)[151] shift +epprd_rg:cl_update_vg_odm_ts(0.001)[152] node_list='' +epprd_rg:cl_update_vg_odm_ts(0.001)[153] /usr/es/sbin/cluster/utilities/cl_get_path all +epprd_rg:cl_update_vg_odm_ts(0.004)[153] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin +epprd_rg:cl_update_vg_odm_ts(0.004)[155] [[ -z '' ]] +epprd_rg:cl_update_vg_odm_ts(0.004)[158] : Check to see if this update is necessary - some LVM levels automatically +epprd_rg:cl_update_vg_odm_ts(0.004)[159] : update volume group timestamps clusterwide. +epprd_rg:cl_update_vg_odm_ts(0.004)[163] instfix -iqk IV74100 +epprd_rg:cl_update_vg_odm_ts(0.005)[163] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.012)[164] instfix -iqk IV74883 +epprd_rg:cl_update_vg_odm_ts(0.013)[164] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.020)[165] instfix -iqk IV74698 +epprd_rg:cl_update_vg_odm_ts(0.020)[165] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.028)[166] instfix -iqk IV74246 +epprd_rg:cl_update_vg_odm_ts(0.028)[166] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.035)[174] emgr -l -L IV74883 +epprd_rg:cl_update_vg_odm_ts(0.036)[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.287)[174] emgr -l -L IV74698 +epprd_rg:cl_update_vg_odm_ts(0.287)[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.538)[174] emgr -l -L IV74246 +epprd_rg:cl_update_vg_odm_ts(0.538)[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.788)[183] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_update_vg_odm_ts(0.788)[184] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_update_vg_odm_ts(0.788)[185] : 99.99.999.999 +epprd_rg:cl_update_vg_odm_ts(0.788)[187] typeset -li V R M F +epprd_rg:cl_update_vg_odm_ts(0.788)[188] typeset -Z2 V +epprd_rg:cl_update_vg_odm_ts(0.788)[189] typeset -Z2 R +epprd_rg:cl_update_vg_odm_ts(0.788)[190] typeset -Z3 M +epprd_rg:cl_update_vg_odm_ts(0.788)[191] typeset -Z3 F +epprd_rg:cl_update_vg_odm_ts(0.788)[192] lvm_lvl6=601008015 +epprd_rg:cl_update_vg_odm_ts(0.788)[192] typeset -li lvm_lvl6 +epprd_rg:cl_update_vg_odm_ts(0.788)[194] lvm_lvl7=701003046 +epprd_rg:cl_update_vg_odm_ts(0.788)[194] typeset -li lvm_lvl7 +epprd_rg:cl_update_vg_odm_ts(0.788)[195] VRMF=0 +epprd_rg:cl_update_vg_odm_ts(0.788)[195] typeset -li VRMF +epprd_rg:cl_update_vg_odm_ts(0.788)[198] : Here try and figure out what level of LVM is installed +epprd_rg:cl_update_vg_odm_ts(0.789)[200] lslpp -lcqOr bos.rte.lvm +epprd_rg:cl_update_vg_odm_ts(0.790)[200] cut -f3 -d: +epprd_rg:cl_update_vg_odm_ts(0.791)[200] read V R M F +epprd_rg:cl_update_vg_odm_ts(0.791)[200] IFS=. +epprd_rg:cl_update_vg_odm_ts(0.791)[201] VRMF=0702005101 +epprd_rg:cl_update_vg_odm_ts(0.791)[203] (( 7 == 6 && 702005101 >= 601008015 )) +epprd_rg:cl_update_vg_odm_ts(0.791)[204] (( 702005101 >= 701003046 )) +epprd_rg:cl_update_vg_odm_ts(0.791)[207] : LVM at a level in which timestamp update is unnecessary +epprd_rg:cl_update_vg_odm_ts(0.791)[209] return 0 +epprd_rg:cl_deactivate_vgs(1.120):datavg[vgs_varyoff:291] (( 0 == 0 )) +epprd_rg:cl_deactivate_vgs(1.120):datavg[vgs_varyoff:294] : successful varyoff, set the fence height to read-only +epprd_rg:cl_deactivate_vgs(1.120):datavg[vgs_varyoff:297] cl_set_vg_fence_height -c datavg ro cl_set_vg_fence_height[126]: version @(#)10 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 cl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY) cl_set_vg_fence_height[214]: read(datavg, 16) cl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=ro(2)) +epprd_rg:cl_deactivate_vgs(1.123):datavg[vgs_varyoff:298] RC=0 +epprd_rg:cl_deactivate_vgs(1.123):datavg[vgs_varyoff:299] (( 0 != 0 )) +epprd_rg:cl_deactivate_vgs(1.123):datavg[vgs_varyoff:403] : Append status to the status file. +epprd_rg:cl_deactivate_vgs(1.123):datavg[vgs_varyoff:407] echo datavg 0 +epprd_rg:cl_deactivate_vgs(1.123):datavg[vgs_varyoff:407] 1>> /tmp/_deactivate_vgs.tmp +epprd_rg:cl_deactivate_vgs(1.124):datavg[vgs_varyoff:408] return 0 +epprd_rg:cl_deactivate_vgs(1.124):datavg[731] unset PS4_LOOP +epprd_rg:cl_deactivate_vgs(1.124)[736] : Wait for the background instances of vgs_varyoff +epprd_rg:cl_deactivate_vgs(1.124)[738] wait +epprd_rg:cl_deactivate_vgs(1.124)[741] : Collect any failure indications from backgrounded varyoff processing +epprd_rg:cl_deactivate_vgs(1.124)[743] [[ -f /tmp/_deactivate_vgs.tmp ]] +epprd_rg:cl_deactivate_vgs(1.125)[748] cat /tmp/_deactivate_vgs.tmp +epprd_rg:cl_deactivate_vgs(1.125)[748] read VGNAME VARYOFF_STATUS +epprd_rg:cl_deactivate_vgs(1.126)[750] [[ 0 == 1 ]] +epprd_rg:cl_deactivate_vgs(1.126)[748] read VGNAME VARYOFF_STATUS +epprd_rg:cl_deactivate_vgs(1.126)[765] rm -f /tmp/_deactivate_vgs.tmp +epprd_rg:cl_deactivate_vgs(1.129)[769] : Update Resource Manager - release success for the non-error VGs +epprd_rg:cl_deactivate_vgs(1.129)[771] ALLNOERRVGS=All_nonerror_volume_groups +epprd_rg:cl_deactivate_vgs(1.129)[772] [[ true == false ]] +epprd_rg:cl_deactivate_vgs(1.129)[778] cl_RMupdate resource_down All_nonerror_volume_groups cl_deactivate_vgs 2023-01-28T18:05:37.789453 2023-01-28T18:05:37.793922 +epprd_rg:cl_deactivate_vgs(1.156)[782] [[ FALSE == TRUE ]] +epprd_rg:cl_deactivate_vgs(1.156)[791] exit 0 +epprd_rg:process_resources[process_volume_groups:2606] RC=0 +epprd_rg:process_resources[process_volume_groups:2607] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[process_volume_groups:2620] (( 0 != 0 )) +epprd_rg:process_resources[process_volume_groups:2627] return 0 +epprd_rg:process_resources[process_volume_groups_main:2556] STAT=0 +epprd_rg:process_resources[process_volume_groups_main:2559] return 0 +epprd_rg:process_resources[3572] RC=0 +epprd_rg:process_resources[3573] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3575] [[ 0 != 0 ]] +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:05:37.807350 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=SERVICE_LABELS ACTION=RELEASE IP_LABELS='"epprd"' RESOURCE_GROUPS='"epprd_rg' '"' COMMUNICATION_LINKS='""' +epprd_rg:process_resources[1] JOB_TYPE=SERVICE_LABELS +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] IP_LABELS=epprd +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] COMMUNICATION_LINKS='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ SERVICE_LABELS == RELEASE ]] +epprd_rg:process_resources[3360] [[ SERVICE_LABELS == ONLINE ]] +epprd_rg:process_resources[3407] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[3411] release_service_labels +epprd_rg:process_resources[release_service_labels:3125] PS4_FUNC=release_service_labels +epprd_rg:process_resources[release_service_labels:3125] typeset PS4_FUNC +epprd_rg:process_resources[release_service_labels:3126] [[ high == high ]] +epprd_rg:process_resources[release_service_labels:3126] set -x +epprd_rg:process_resources[release_service_labels:3127] STAT=0 +epprd_rg:process_resources[release_service_labels:3128] clcallev release_service_addr Jan 28 2023 18:05:37 EVENT START: release_service_addr |2023-01-28T18:05:37|1583|EVENT START: release_service_addr | +epprd_rg:release_service_addr[87] version=1.44 +epprd_rg:release_service_addr[90] STATUS=0 +epprd_rg:release_service_addr[91] PROC_RES=false +epprd_rg:release_service_addr[95] [[ SERVICE_LABELS != 0 ]] +epprd_rg:release_service_addr[95] [[ SERVICE_LABELS != GROUP ]] +epprd_rg:release_service_addr[96] PROC_RES=true +epprd_rg:release_service_addr[97] _IP_LABELS=epprd +epprd_rg:release_service_addr[109] saveNSORDER=UNDEFINED +epprd_rg:release_service_addr[110] NSORDER=local +epprd_rg:release_service_addr[110] export NSORDER +epprd_rg:release_service_addr[117] export GROUPNAME +epprd_rg:release_service_addr[119] [[ true == true ]] +epprd_rg:release_service_addr[120] get_list_head epprd +epprd_rg:release_service_addr[120] read SERVICELABELS +epprd_rg:release_service_addr[121] get_list_tail epprd +epprd_rg:release_service_addr[121] read IP_LABELS +epprd_rg:release_service_addr[127] cl_RMupdate resource_releasing All_service_addrs release_service_addr 2023-01-28T18:05:37.891634 2023-01-28T18:05:37.896031 +epprd_rg:release_service_addr[136] clgetif -a epprd +epprd_rg:release_service_addr[136] LC_ALL=C en0 +epprd_rg:release_service_addr[137] return_code=0 +epprd_rg:release_service_addr[137] typeset -li return_code +epprd_rg:release_service_addr[138] (( 0 )) +epprd_rg:release_service_addr[159] cllsif -J '~' -Sn epprd +epprd_rg:release_service_addr[159] cut -d~ -f7 +epprd_rg:release_service_addr[159] uniq +epprd_rg:release_service_addr[159] textual_addr=61.81.244.156 +epprd_rg:release_service_addr[160] clgetif -a 61.81.244.156 +epprd_rg:release_service_addr[160] LC_ALL=C +epprd_rg:release_service_addr[160] INTERFACE='en0 ' +epprd_rg:release_service_addr[161] [[ -z 'en0 ' ]] +epprd_rg:release_service_addr[182] clgetif -n 61.81.244.156 +epprd_rg:release_service_addr[182] LC_ALL=C +epprd_rg:release_service_addr[182] NETMASK='255.255.255.0 ' +epprd_rg:release_service_addr[183] cllsif -J '~' +epprd_rg:release_service_addr[183] grep -wF 61.81.244.156 +epprd_rg:release_service_addr[184] cut -d~ -f3 +epprd_rg:release_service_addr[184] sort -u +epprd_rg:release_service_addr[183] NETWORK=net_ether_01 +epprd_rg:release_service_addr[189] cllsif -J '~' -Si epprda +epprd_rg:release_service_addr[189] grep '~boot~' +epprd_rg:release_service_addr[190] cut -d~ -f3,7 +epprd_rg:release_service_addr[190] grep ^net_ether_01~ +epprd_rg:release_service_addr[191] cut -d~ -f2 +epprd_rg:release_service_addr[191] tail -1 +epprd_rg:release_service_addr[189] BOOT=61.81.244.134 +epprd_rg:release_service_addr[193] [[ -z 61.81.244.134 ]] +epprd_rg:release_service_addr[214] [[ -n 'en0 ' ]] +epprd_rg:release_service_addr[216] cut -f15 -d~ +epprd_rg:release_service_addr[216] cllsif -J '~' -Sn 61.81.244.156 +epprd_rg:release_service_addr[216] [[ AF_INET == AF_INET6 ]] +epprd_rg:release_service_addr[221] cl_swap_IP_address rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[462] version=1.9.14.8 +epprd_rg:cl_swap_IP_address[464] cl_get_path -S +epprd_rg:cl_swap_IP_address[464] OP_SEP='~' +epprd_rg:cl_swap_IP_address[465] LC_ALL=C +epprd_rg:cl_swap_IP_address[465] export LC_ALL +epprd_rg:cl_swap_IP_address[466] RESTORE_ROUTES=/usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[468] cl_echo 33 'Starting execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0' /usr/es/sbin/cluster/events/utils/cl_swap_IP_address 'rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0' Jan 28 2023 18:05:37Starting execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0+epprd_rg:cl_swap_IP_address[470] typeset -i oslevel +epprd_rg:cl_swap_IP_address[471] /usr/bin/sed s/-//g +epprd_rg:cl_swap_IP_address[471] /usr/bin/oslevel -r +epprd_rg:cl_swap_IP_address[471] oslevel=720005 +epprd_rg:cl_swap_IP_address[476] [[ 6 == 6 ]] +epprd_rg:cl_swap_IP_address[477] [[ 6 == 7 ]] +epprd_rg:cl_swap_IP_address[484] no -a +epprd_rg:cl_swap_IP_address[484] grep ipignoreredirects +epprd_rg:cl_swap_IP_address[484] awk '{ print $3 }' +epprd_rg:cl_swap_IP_address[484] PRIOR_IPIGNORE_REDIRECTS_VALUE=0 +epprd_rg:cl_swap_IP_address[485] /usr/sbin/no -o ipignoreredirects=1 Setting ipignoreredirects to 1 +epprd_rg:cl_swap_IP_address[490] PROC_RES=false +epprd_rg:cl_swap_IP_address[491] [[ SERVICE_LABELS != 0 ]] +epprd_rg:cl_swap_IP_address[491] [[ SERVICE_LABELS != GROUP ]] +epprd_rg:cl_swap_IP_address[492] PROC_RES=true +epprd_rg:cl_swap_IP_address[495] set -u +epprd_rg:cl_swap_IP_address[497] RC=0 +epprd_rg:cl_swap_IP_address[504] netstat -in Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll en0 1500 link#2 fa.e6.13.4e.a9.20 183746055 0 60760193 0 0 en0 1500 61.81.244 61.81.244.156 183746055 0 60760193 0 0 en0 1500 61.81.244 61.81.244.134 183746055 0 60760193 0 0 lo0 16896 link#1 34272009 0 34272009 0 0 lo0 16896 127 127.0.0.1 34272009 0 34272009 0 0 lo0 16896 ::1%1 34272009 0 34272009 0 0 +epprd_rg:cl_swap_IP_address[505] netstat -rnC Routing tables Destination Gateway Flags Wt Policy If Cost Config_Cost Route tree for Protocol Family 2 (Internet): default 61.81.244.1 UG 1 - en0 0 0 61.81.244.0 61.81.244.156 UHSb 1 - en0 0 0 => 61.81.244/24 61.81.244.156 U 1 - en0 0 0 61.81.244.134 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.156 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.255 61.81.244.156 UHSb 1 - en0 0 0 127/8 127.0.0.1 U 1 - lo0 0 0 Route tree for Protocol Family 24 (Internet v6): ::1%1 ::1%1 UH 1 - lo0 0 0 +epprd_rg:cl_swap_IP_address[506] CASC_OR_ROT=rotating +epprd_rg:cl_swap_IP_address[507] ACQ_OR_RLSE=release +epprd_rg:cl_swap_IP_address[508] IF=en0 +epprd_rg:cl_swap_IP_address[509] ADDR=61.81.244.134 +epprd_rg:cl_swap_IP_address[510] OLD_ADDR=61.81.244.156 +epprd_rg:cl_swap_IP_address[511] NETMASK=255.255.255.0 +epprd_rg:cl_swap_IP_address[514] [[ rotating == cascading ]] +epprd_rg:cl_swap_IP_address[525] cut -f3 -d~ +epprd_rg:cl_swap_IP_address[525] cllsif -J '~' -Sw -n 61.81.244.134 +epprd_rg:cl_swap_IP_address[525] NET=net_ether_01 +epprd_rg:cl_swap_IP_address[528] clodmget -qidentifier=61.81.244.134 -f max_aliases -n HACMPadapter +epprd_rg:cl_swap_IP_address[528] ALIAS_FIRST=0 +epprd_rg:cl_swap_IP_address[529] grep -c -w inet +epprd_rg:cl_swap_IP_address[529] ifconfig en0 +epprd_rg:cl_swap_IP_address[529] LC_ALL=C +epprd_rg:cl_swap_IP_address[529] NUM_ADDRS=2 +epprd_rg:cl_swap_IP_address[530] [[ release == acquire ]] +epprd_rg:cl_swap_IP_address[598] cl_echo 7320 'cl_swap_IP_address: Removing aliased IP address 61.81.244.156 from adapter en0' cl_swap_IP_address 61.81.244.156 en0 Jan 28 2023 18:05:38cl_swap_IP_address: Removing aliased IP address 61.81.244.156 from adapter en0+epprd_rg:cl_swap_IP_address[600] amlog_trace '' 'Deliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_swap_IP_address[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_swap_IP_address[amlog_trace:319] cltime +epprd_rg:cl_swap_IP_address[amlog_trace:319] DATE=2023-01-28T18:05:38.137759 +epprd_rg:cl_swap_IP_address[amlog_trace:320] echo '|2023-01-28T18:05:38.137759|INFO: Deliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_swap_IP_address[601] PERSISTENT='' +epprd_rg:cl_swap_IP_address[602] ADDR1=61.81.244.156 +epprd_rg:cl_swap_IP_address[603] disable_pmtu_gated Setting tcp_pmtu_discover to 0 Setting udp_pmtu_discover to 0 +epprd_rg:cl_swap_IP_address[604] alias_replace_routes /usr/es/sbin/cluster/.restore_routes en0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:168] RR=/usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[alias_replace_routes:169] shift +epprd_rg:cl_swap_IP_address[alias_replace_routes:170] interfaces=en0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:171] RC=0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:173] cp /dev/null /usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[alias_replace_routes:175] cat +epprd_rg:cl_swap_IP_address[alias_replace_routes:175] 1> /usr/es/sbin/cluster/.restore_routes 0<< \EOF +epprd_rg:cl_swap_IP_address[alias_replace_routes:175] date #!/bin/ksh # # Script created by cl_swap_IP_address on Sat Jan 28 18:05:38 KORST 2023 # PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin PS4='${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO] ' export VERBOSE_LOGGING=${VERBOSE_LOGGING:-"high"} [[ "$VERBOSE_LOGGING" = "high" ]] && set -x : Starting $0 at $(date) # EOF +epprd_rg:cl_swap_IP_address[alias_replace_routes:189] awk '$3 !~ "[Ll]ink" && $3 !~ ":" && $3 !~ "Network" {print $4}' +epprd_rg:cl_swap_IP_address[alias_replace_routes:189] netstat -in +epprd_rg:cl_swap_IP_address[alias_replace_routes:189] LOCADDRS=$'61.81.244.156\n61.81.244.134\n127.0.0.1' +epprd_rg:cl_swap_IP_address[alias_replace_routes:191] netstat -rnC Routing tables Destination Gateway Flags Wt Policy If Cost Config_Cost Route tree for Protocol Family 2 (Internet): default 61.81.244.1 UG 1 - en0 0 0 61.81.244.0 61.81.244.156 UHSb 1 - en0 0 0 => 61.81.244/24 61.81.244.156 U 1 - en0 0 0 61.81.244.134 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.156 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.255 61.81.244.156 UHSb 1 - en0 0 0 127/8 127.0.0.1 U 1 - lo0 0 0 Route tree for Protocol Family 24 (Internet v6): ::1%1 ::1%1 UH 1 - lo0 0 0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:200] I=1 +epprd_rg:cl_swap_IP_address[alias_replace_routes:200] typeset -li I +epprd_rg:cl_swap_IP_address[alias_replace_routes:201] NXTSVC='' +epprd_rg:cl_swap_IP_address[alias_replace_routes:203] awk '$3 !~ "[Ll]ink" && $3 !~ ":" && ($1 == "en0" || $1 == "en0*") {print $4}' +epprd_rg:cl_swap_IP_address[alias_replace_routes:203] netstat -in +epprd_rg:cl_swap_IP_address[alias_replace_routes:203] IFADDRS=$'61.81.244.156\n61.81.244.134' +epprd_rg:cl_swap_IP_address[alias_replace_routes:204] cllsif -J '~' -Spi epprda +epprd_rg:cl_swap_IP_address[alias_replace_routes:204] grep '~net_ether_01~' +epprd_rg:cl_swap_IP_address[alias_replace_routes:205] grep -E '~service~|~persistent~' +epprd_rg:cl_swap_IP_address[alias_replace_routes:205] cut -d~ -f7 +epprd_rg:cl_swap_IP_address[alias_replace_routes:205] sort -u +epprd_rg:cl_swap_IP_address[alias_replace_routes:204] SVCADDRS=61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:210] awk '$1 !~ ":" {print $1}' +epprd_rg:cl_swap_IP_address[alias_replace_routes:210] echo 61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:210] SVCADDRS=61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:212] cllsif -J '~' -Spi epprda +epprd_rg:cl_swap_IP_address[alias_replace_routes:212] grep '~net_ether_01~' +epprd_rg:cl_swap_IP_address[alias_replace_routes:213] grep -E '~persistent~' +epprd_rg:cl_swap_IP_address[alias_replace_routes:214] cut -d~ -f7 +epprd_rg:cl_swap_IP_address[alias_replace_routes:212] PERSISTENT_IP='' +epprd_rg:cl_swap_IP_address[alias_replace_routes:215] routeaddr='' +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:223] [[ -z '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:225] routeaddr=61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:227] [[ 61.81.244.156 != 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:234] [[ -n '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.134 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.134 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] [[ 61.81.244.134 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:234] [[ -n '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:243] NXTADDR='' +epprd_rg:cl_swap_IP_address[alias_replace_routes:244] bootaddr='' +epprd_rg:cl_swap_IP_address[alias_replace_routes:245] [[ -z '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:247] cllsif -J '~' -Spi epprda +epprd_rg:cl_swap_IP_address[alias_replace_routes:247] grep '~net_ether_01~' +epprd_rg:cl_swap_IP_address[alias_replace_routes:247] grep '~boot~' +epprd_rg:cl_swap_IP_address[alias_replace_routes:247] awk -F~ '$9 == "en0" { print $7; }' +epprd_rg:cl_swap_IP_address[alias_replace_routes:247] bootaddr=61.81.244.134 +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] [[ 61.81.244.156 == 61.81.244.134 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] [[ 61.81.244.134 == 61.81.244.134 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] clgetnet 61.81.244.134 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] clgetnet 61.81.244.134 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] clgetnet 61.81.244.134 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] [[ 61.81.244.134 != 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:252] NXTADDR=61.81.244.134 +epprd_rg:cl_swap_IP_address[alias_replace_routes:253] break +epprd_rg:cl_swap_IP_address[alias_replace_routes:258] swaproute=0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:259] NETSTAT_FLAGS='-nrf inet' +epprd_rg:cl_swap_IP_address[alias_replace_routes:261] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:264] swaproute=1 +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] netstat -nrf inet +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] fgrep -w en0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] read DEST GW FLAGS OTHER +epprd_rg:cl_swap_IP_address[alias_replace_routes:268] LOOPBACK=127.0.0.1 +epprd_rg:cl_swap_IP_address[alias_replace_routes:336] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:336] clgetnet 61.81.244.1 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:336] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:338] [[ 0 == 0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:341] [[ -z release ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:341] [[ 61.81.244.156 == ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:346] print 'cl_route_change default 127.0.0.1 61.81.244.1 inet' +epprd_rg:cl_swap_IP_address[alias_replace_routes:346] 1>> /usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[alias_replace_routes:347] add_rc_check /usr/es/sbin/cluster/.restore_routes cl_route_change +epprd_rg:cl_swap_IP_address[add_rc_check:70] RR=/usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[add_rc_check:71] FUNC=cl_route_change +epprd_rg:cl_swap_IP_address[add_rc_check:73] cat +epprd_rg:cl_swap_IP_address[add_rc_check:73] 1>> /usr/es/sbin/cluster/.restore_routes 0<< \EOF rc=$? if [[ $rc != 0 ]] then echo "ERROR: cl_route_change failed with code $rc" cl_route_change_RC=$rc fi EOF +epprd_rg:cl_swap_IP_address[alias_replace_routes:350] cl_route_change default 61.81.244.1 127.0.0.1 inet +epprd_rg:cl_swap_IP_address[alias_replace_routes:351] RC=0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:352] : cl_route_change completed with 0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:353] I=I+1 +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] read DEST GW FLAGS OTHER +epprd_rg:cl_swap_IP_address[alias_replace_routes:268] LOOPBACK=127.0.0.2 +epprd_rg:cl_swap_IP_address[alias_replace_routes:290] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:292] [[ '' != '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:292] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:294] route delete -host 61.81.244.0 61.81.244.156 61.81.244.156 host 61.81.244.0: gateway 61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] read DEST GW FLAGS OTHER +epprd_rg:cl_swap_IP_address[alias_replace_routes:268] LOOPBACK=127.0.0.2 +epprd_rg:cl_swap_IP_address[alias_replace_routes:272] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:272] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:272] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:274] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:276] [[ '' != '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:276] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:279] route delete -net 61.81.244/24 61.81.244.156 61.81.244.156 net 61.81.244: gateway 61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] read DEST GW FLAGS OTHER +epprd_rg:cl_swap_IP_address[alias_replace_routes:268] LOOPBACK=127.0.0.2 +epprd_rg:cl_swap_IP_address[alias_replace_routes:290] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:292] [[ '' != '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:292] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:294] route delete -host 61.81.244.255 61.81.244.156 61.81.244.156 host 61.81.244.255: gateway 61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] read DEST GW FLAGS OTHER +epprd_rg:cl_swap_IP_address[alias_replace_routes:360] echo 'exit $cl_route_change_RC' +epprd_rg:cl_swap_IP_address[alias_replace_routes:360] 1>> /usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[alias_replace_routes:361] chmod +x /usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[alias_replace_routes:362] return 0 +epprd_rg:cl_swap_IP_address[605] RC=0 +epprd_rg:cl_swap_IP_address[606] : alias_replace_routes completed with 0 +epprd_rg:cl_swap_IP_address[609] clifconfig en0 delete 61.81.244.156 +epprd_rg:clifconfig[117] version=1.9 +epprd_rg:clifconfig[121] set -A args en0 delete 61.81.244.156 +epprd_rg:clifconfig[124] interface=en0 +epprd_rg:clifconfig[125] shift +epprd_rg:clifconfig[127] [[ -n delete ]] +epprd_rg:clifconfig[130] delete_val=1 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n 61.81.244.156 ]] +epprd_rg:clifconfig[147] params=' address=61.81.244.156' +epprd_rg:clifconfig[147] addr=61.81.244.156 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n '' ]] +epprd_rg:clifconfig[174] [[ -n 1 ]] +epprd_rg:clifconfig[174] [[ -n epprd_rg ]] +epprd_rg:clifconfig[175] clwparname epprd_rg +epprd_rg:clwparname[38] version=1.3.1.1 +epprd_rg:clwparname[44] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clwparname[44] [[ -z '' ]] +epprd_rg:clwparname[44] exit 0 +epprd_rg:clifconfig[175] WPARNAME='' +epprd_rg:clifconfig[176] (( 0 == 0 )) +epprd_rg:clifconfig[176] [[ -n '' ]] +epprd_rg:clifconfig[218] belongs_to_an_active_wpar 61.81.244.156 +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] [[ -z '' ]] +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] return 1 +epprd_rg:clifconfig[218] read wpar_name wpar_if wpar_netmask wpar_broadcast +epprd_rg:clifconfig[218] IFS='~' +epprd_rg:clifconfig[219] rc=1 +epprd_rg:clifconfig[221] [[ 1 == 0 ]] +epprd_rg:clifconfig[275] ifconfig en0 delete 61.81.244.156 +epprd_rg:cl_swap_IP_address[611] [[ 1 == 1 ]] +epprd_rg:cl_swap_IP_address[613] [[ -n '' ]] +epprd_rg:cl_swap_IP_address[662] [[ -n 61.81.244.134 ]] +epprd_rg:cl_swap_IP_address[671] (( 720005 <= 710003 )) +epprd_rg:cl_swap_IP_address[675] clifconfig en0 alias 61.81.244.134 netmask 255.255.255.0 +epprd_rg:clifconfig[117] version=1.9 +epprd_rg:clifconfig[121] set -A args en0 alias 61.81.244.134 netmask 255.255.255.0 +epprd_rg:clifconfig[124] interface=en0 +epprd_rg:clifconfig[125] shift +epprd_rg:clifconfig[127] [[ -n alias ]] +epprd_rg:clifconfig[129] alias_val=1 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n 61.81.244.134 ]] +epprd_rg:clifconfig[147] params=' address=61.81.244.134' +epprd_rg:clifconfig[147] addr=61.81.244.134 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n netmask ]] +epprd_rg:clifconfig[149] params=' address=61.81.244.134 netmask=255.255.255.0' +epprd_rg:clifconfig[149] shift +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n '' ]] +epprd_rg:clifconfig[174] [[ -n 1 ]] +epprd_rg:clifconfig[174] [[ -n epprd_rg ]] +epprd_rg:clifconfig[175] clwparname epprd_rg +epprd_rg:clwparname[38] version=1.3.1.1 +epprd_rg:clwparname[44] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clwparname[44] [[ -z '' ]] +epprd_rg:clwparname[44] exit 0 +epprd_rg:clifconfig[175] WPARNAME='' +epprd_rg:clifconfig[176] (( 0 == 0 )) +epprd_rg:clifconfig[176] [[ -n '' ]] +epprd_rg:clifconfig[218] belongs_to_an_active_wpar 61.81.244.134 +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] [[ -z '' ]] +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] return 1 +epprd_rg:clifconfig[218] read wpar_name wpar_if wpar_netmask wpar_broadcast +epprd_rg:clifconfig[218] IFS='~' +epprd_rg:clifconfig[219] rc=1 +epprd_rg:clifconfig[221] [[ 1 == 0 ]] +epprd_rg:clifconfig[275] ifconfig en0 alias 61.81.244.134 netmask 255.255.255.0 +epprd_rg:cl_swap_IP_address[679] /usr/es/sbin/cluster/.restore_routes +epprd_rg:.restore_routes[+9] date +epprd_rg:.restore_routes[+9] : Starting /usr/es/sbin/cluster/.restore_routes at Sat Jan 28 18:05:38 KORST 2023 +epprd_rg:.restore_routes[+11] cl_route_change default 127.0.0.1 61.81.244.1 inet +epprd_rg:.restore_routes[+12] rc=0 +epprd_rg:.restore_routes[+13] [[ 0 != 0 ]] +epprd_rg:.restore_routes[+19] exit +epprd_rg:cl_swap_IP_address[680] [[ 0 == 0 ]] +epprd_rg:cl_swap_IP_address[680] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[681] : Completed /usr/es/sbin/cluster/.restore_routes with return code 0 +epprd_rg:cl_swap_IP_address[682] enable_pmtu_gated Setting tcp_pmtu_discover to 1 Setting udp_pmtu_discover to 1 +epprd_rg:cl_swap_IP_address[685] hats_adapter_notify en0 -d 61.81.244.156 alias 2023-01-28T18:05:38.374858 hats_adapter_notify 2023-01-28T18:05:38.375786 hats_adapter_notify +epprd_rg:cl_swap_IP_address[688] check_alias_status en0 61.81.244.156 release +epprd_rg:cl_swap_IP_address[check_alias_status:108] CH_INTERFACE=en0 +epprd_rg:cl_swap_IP_address[check_alias_status:109] CH_ADDRESS=61.81.244.156 +epprd_rg:cl_swap_IP_address[check_alias_status:110] CH_ACQ_OR_RLSE=release +epprd_rg:cl_swap_IP_address[check_alias_status:118] IF_IB=en0 +epprd_rg:cl_swap_IP_address[check_alias_status:120] awk '{print index($0, "ib")}' +epprd_rg:cl_swap_IP_address[check_alias_status:120] echo en0 +epprd_rg:cl_swap_IP_address[check_alias_status:120] IS_IB=0 +epprd_rg:cl_swap_IP_address[check_alias_status:122] [[ 0 != 1 ]] +epprd_rg:cl_swap_IP_address[check_alias_status:124] clifconfig en0 +epprd_rg:cl_swap_IP_address[check_alias_status:124] fgrep -w 61.81.244.156 +epprd_rg:cl_swap_IP_address[check_alias_status:124] awk '{print $2}' +epprd_rg:clifconfig[117] version=1.9 +epprd_rg:clifconfig[121] set -A args en0 +epprd_rg:clifconfig[124] interface=en0 +epprd_rg:clifconfig[125] shift +epprd_rg:clifconfig[127] [[ -n '' ]] +epprd_rg:clifconfig[174] [[ -n '' ]] +epprd_rg:clifconfig[218] belongs_to_an_active_wpar +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] [[ -z '' ]] +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] return 1 +epprd_rg:clifconfig[218] read wpar_name wpar_if wpar_netmask wpar_broadcast +epprd_rg:clifconfig[218] IFS='~' +epprd_rg:clifconfig[219] rc=1 +epprd_rg:clifconfig[221] [[ 1 == 0 ]] +epprd_rg:clifconfig[275] ifconfig en0 +epprd_rg:cl_swap_IP_address[check_alias_status:124] ADDR='' +epprd_rg:cl_swap_IP_address[check_alias_status:129] [ release = acquire ] +epprd_rg:cl_swap_IP_address[check_alias_status:139] [[ '' == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[check_alias_status:144] return 0 +epprd_rg:cl_swap_IP_address[689] RC1=0 +epprd_rg:cl_swap_IP_address[690] [[ 0 == 0 ]] +epprd_rg:cl_swap_IP_address[690] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[693] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[697] amlog_trace '' 'Deliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_swap_IP_address[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_swap_IP_address[amlog_trace:319] cltime +epprd_rg:cl_swap_IP_address[amlog_trace:319] DATE=2023-01-28T18:05:38.429705 +epprd_rg:cl_swap_IP_address[amlog_trace:320] echo '|2023-01-28T18:05:38.429705|INFO: Deliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_swap_IP_address[701] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[714] flush_arp +epprd_rg:cl_swap_IP_address[flush_arp:49] arp -an +epprd_rg:cl_swap_IP_address[flush_arp:49] grep '\?' +epprd_rg:cl_swap_IP_address[flush_arp:49] tr -d '()' +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.27 61.81.244.27 (61.81.244.27) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.220 61.81.244.220 (61.81.244.220) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.239 61.81.244.239 (61.81.244.239) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.123 61.81.244.123 (61.81.244.123) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.146 61.81.244.146 (61.81.244.146) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.1 61.81.244.1 (61.81.244.1) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:52] return 0 +epprd_rg:cl_swap_IP_address[716] netstat -in Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll en0 1500 link#2 fa.e6.13.4e.a9.20 183746058 0 60760197 0 0 en0 1500 61.81.244 61.81.244.134 183746058 0 60760197 0 0 lo0 16896 link#1 34272016 0 34272016 0 0 lo0 16896 127 127.0.0.1 34272016 0 34272016 0 0 lo0 16896 ::1%1 34272016 0 34272016 0 0 +epprd_rg:cl_swap_IP_address[717] netstat -rnC Routing tables Destination Gateway Flags Wt Policy If Cost Config_Cost Route tree for Protocol Family 2 (Internet): default 61.81.244.1 UG 1 - en0 0 0 61.81.244.0 61.81.244.134 UHSb 1 - en0 0 0 => 61.81.244/24 61.81.244.134 U 1 - en0 0 0 61.81.244.134 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.255 61.81.244.134 UHSb 1 - en0 0 0 127/8 127.0.0.1 U 1 - lo0 0 0 Route tree for Protocol Family 24 (Internet v6): ::1%1 ::1%1 UH 1 - lo0 0 0 +epprd_rg:cl_swap_IP_address[989] no -o ipignoreredirects=0 Setting ipignoreredirects to 0 +epprd_rg:cl_swap_IP_address[992] cl_echo 32 'Completed execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0. Exit status = 0' /usr/es/sbin/cluster/events/utils/cl_swap_IP_address 'rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0' 0 Jan 28 2023 18:05:38Completed execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0. Exit status = 0+epprd_rg:cl_swap_IP_address[994] date Sat Jan 28 18:05:38 KORST 2023 +epprd_rg:cl_swap_IP_address[996] exit 0 +epprd_rg:release_service_addr[225] RC=0 +epprd_rg:release_service_addr[227] [[ 0 != 0 ]] +epprd_rg:release_service_addr[245] cl_RMupdate resource_down All_nonerror_service_addrs release_service_addr 2023-01-28T18:05:38.503800 2023-01-28T18:05:38.508210 +epprd_rg:release_service_addr[249] [[ UNDEFINED != UNDEFINED ]] +epprd_rg:release_service_addr[252] NSORDER='' +epprd_rg:release_service_addr[252] export NSORDER +epprd_rg:release_service_addr[255] exit 0 Jan 28 2023 18:05:38 EVENT COMPLETED: release_service_addr 0 |2023-01-28T18:05:38|1583|EVENT COMPLETED: release_service_addr 0| +epprd_rg:process_resources[release_service_labels:3129] RC=0 +epprd_rg:process_resources[release_service_labels:3131] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[release_service_labels:3146] (( 0 != 0 )) +epprd_rg:process_resources[release_service_labels:3152] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. +epprd_rg:process_resources[release_service_labels:3154] return 0 +epprd_rg:process_resources[3412] RC=0 +epprd_rg:process_resources[3413] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:05:40.614629 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=WPAR ACTION=RELEASE RESOURCE_GROUPS='"epprd_rg' '"' +epprd_rg:process_resources[1] JOB_TYPE=WPAR +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ WPAR == RELEASE ]] +epprd_rg:process_resources[3360] [[ WPAR == ONLINE ]] +epprd_rg:process_resources[3492] process_wpars RELEASE +epprd_rg:process_resources[process_wpars:3265] PS4_FUNC=process_wpars +epprd_rg:process_resources[process_wpars:3265] typeset PS4_FUNC +epprd_rg:process_resources[process_wpars:3266] [[ high == high ]] +epprd_rg:process_resources[process_wpars:3266] set -x +epprd_rg:process_resources[process_wpars:3267] STAT=0 +epprd_rg:process_resources[process_wpars:3268] action=RELEASE +epprd_rg:process_resources[process_wpars:3268] typeset action +epprd_rg:process_resources[process_wpars:3272] export GROUPNAME +epprd_rg:process_resources[process_wpars:3280] clstop_wpar +epprd_rg:clstop_wpar[42] version=1.7 +epprd_rg:clstop_wpar[46] [[ rg_move == reconfig_resource_release ]] +epprd_rg:clstop_wpar[46] [[ RELEASE_PRIMARY == reconfig_resource_release ]] +epprd_rg:clstop_wpar[55] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clstop_wpar[55] [[ -z '' ]] +epprd_rg:clstop_wpar[55] exit 0 +epprd_rg:process_resources[process_wpars:3281] RC=0 +epprd_rg:process_resources[process_wpars:3285] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[process_wpars:3294] return 0 +epprd_rg:process_resources[3493] RC=0 +epprd_rg:process_resources[3495] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3497] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:05:40.655629 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=OFFLINE RESOURCE_GROUPS='"epprd_rg"' +epprd_rg:process_resources[1] JOB_TYPE=OFFLINE +epprd_rg:process_resources[1] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ OFFLINE == RELEASE ]] +epprd_rg:process_resources[3360] [[ OFFLINE == ONLINE ]] +epprd_rg:process_resources[3681] set_resource_group_state DOWN +epprd_rg:process_resources[set_resource_group_state:82] PS4_FUNC=set_resource_group_state +epprd_rg:process_resources[set_resource_group_state:82] typeset PS4_FUNC +epprd_rg:process_resources[set_resource_group_state:83] [[ high == high ]] +epprd_rg:process_resources[set_resource_group_state:83] set -x +epprd_rg:process_resources[set_resource_group_state:84] STAT=0 +epprd_rg:process_resources[set_resource_group_state:85] new_status=DOWN +epprd_rg:process_resources[set_resource_group_state:89] export GROUPNAME +epprd_rg:process_resources[set_resource_group_state:90] [[ DOWN != DOWN ]] +epprd_rg:process_resources[set_resource_group_state:100] : Resource Manager Updates +epprd_rg:process_resources[set_resource_group_state:122] cl_RMupdate rg_down epprd_rg process_resources 2023-01-28T18:05:40.697386 2023-01-28T18:05:40.701691 +epprd_rg:process_resources[set_resource_group_state:124] amlog_trace '' 'acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:318] clcycle clavailability.log +epprd_rg:process_resources[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:process_resources[amlog_trace:319] cltime +epprd_rg:process_resources[amlog_trace:319] DATE=2023-01-28T18:05:40.740559 +epprd_rg:process_resources[amlog_trace:320] echo '|2023-01-28T18:05:40.740559|INFO: acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:process_resources[set_resource_group_state:153] return 0 +epprd_rg:process_resources[3682] RC=0 +epprd_rg:process_resources[3683] postvg_for_rdisk +epprd_rg:process_resources[postvg_for_rdisk:856] PS4_FUNC=postvg_for_rdisk +epprd_rg:process_resources[postvg_for_rdisk:856] typeset PS4_FUNC +epprd_rg:process_resources[postvg_for_rdisk:857] [[ high == high ]] +epprd_rg:process_resources[postvg_for_rdisk:857] set -x +epprd_rg:process_resources[postvg_for_rdisk:858] STAT=0 +epprd_rg:process_resources[postvg_for_rdisk:859] FAILURE_IN_METHOD=0 +epprd_rg:process_resources[postvg_for_rdisk:859] typeset -li FAILURE_IN_METHOD +epprd_rg:process_resources[postvg_for_rdisk:860] LIST_OF_FAILED_RGS='' +epprd_rg:process_resources[postvg_for_rdisk:861] RG_LIST=epprd_rg +epprd_rg:process_resources[postvg_for_rdisk:862] RDISK_LIST='' +epprd_rg:process_resources[postvg_for_rdisk:863] DISK_LIST='' +epprd_rg:process_resources[postvg_for_rdisk:866] : Resource groups are processed individually. This is required because +epprd_rg:process_resources[postvg_for_rdisk:867] : the replication mechanism may differ between resource groups. +epprd_rg:process_resources[postvg_for_rdisk:871] getReplicatedResources epprd_rg +epprd_rg:process_resources[getReplicatedResources:699] PS4_FUNC=getReplicatedResources +epprd_rg:process_resources[getReplicatedResources:699] typeset PS4_FUNC +epprd_rg:process_resources[getReplicatedResources:700] [[ high == high ]] +epprd_rg:process_resources[getReplicatedResources:700] set -x +epprd_rg:process_resources[getReplicatedResources:702] RV=false +epprd_rg:process_resources[getReplicatedResources:704] clodmget -n -f type HACMPrresmethods +epprd_rg:process_resources[getReplicatedResources:704] [[ -n 9 ]] +epprd_rg:process_resources[getReplicatedResources:707] : Replicated resource methods are defined, check for resources +epprd_rg:process_resources[getReplicatedResources:709] clodmget -q $'name like \'*_REP_RESOURCE\' AND group=epprd_rg' -f value -n HACMPresource +epprd_rg:process_resources[getReplicatedResources:709] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:718] : Verify if any backup profiles are configured and trigger cbm utilities based on that +epprd_rg:process_resources[getReplicatedResources:720] clodmget -q name=BACKUP_ENABLED -f value HACMPresource +epprd_rg:process_resources[getReplicatedResources:720] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:739] echo false +epprd_rg:process_resources[postvg_for_rdisk:871] REPLICATED_RESOURCES=false +epprd_rg:process_resources[postvg_for_rdisk:873] [[ false == true ]] +epprd_rg:process_resources[postvg_for_rdisk:946] return 0 +epprd_rg:process_resources[3684] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:05:40.770375 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=NONE +epprd_rg:process_resources[1] JOB_TYPE=NONE +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ NONE == RELEASE ]] +epprd_rg:process_resources[3360] [[ NONE == ONLINE ]] +epprd_rg:process_resources[3729] break +epprd_rg:process_resources[3740] : If sddsrv was turned off above, turn it back on again +epprd_rg:process_resources[3742] [[ FALSE == TRUE ]] +epprd_rg:process_resources[3747] exit 0 :rg_move[247] : unsetting AM_SYNC_CALLED_BY from $'callers environment as\n: we dont' require it after this point in execution. :rg_move[250] unset AM_SYNC_CALLED_BY :rg_move[253] [[ -f /tmp/.NFSSTOPPED ]] :rg_move[274] [[ -f /tmp/.RPCLOCKDSTOPPED ]] :rg_move[293] exit 0 Jan 28 2023 18:05:40 EVENT COMPLETED: rg_move epprda 1 RELEASE 0 |2023-01-28T18:05:40|1583|EVENT COMPLETED: rg_move epprda 1 RELEASE 0| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-01-28T18:05:40.869965 :clevlog[amlog_trace:320] echo '|2023-01-28T18:05:40.869965|INFO: rg_move|epprd_rg|epprda|1|RELEASE|0' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :rg_move_release[+68] exit 0 Jan 28 2023 18:05:40 EVENT COMPLETED: rg_move_release epprda 1 0 |2023-01-28T18:05:40|1583|EVENT COMPLETED: rg_move_release epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:05:40.995859 + echo '|2023-01-28T18:05:40.995859|INFO: rg_move_release|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 18:05:41 EVENT START: rg_move_fence epprda 1 |2023-01-28T18:05:41|1583|EVENT START: rg_move_fence epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:05:41.187709 + echo '|2023-01-28T18:05:41.187709|INFO: rg_move_fence|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_fence[62] [[ high == high ]] :rg_move_fence[62] version=1.11 :rg_move_fence[63] NODENAME=epprda :rg_move_fence[63] export NODENAME :rg_move_fence[65] set -u :rg_move_fence[67] [ 2 != 2 ] :rg_move_fence[73] set +u :rg_move_fence[75] [[ -z TRUE ]] :rg_move_fence[80] [[ TRUE == TRUE ]] :rg_move_fence[82] LOCAL_NODENAME=epprda :rg_move_fence[83] odmget -qid=1 HACMPgroup :rg_move_fence[83] egrep 'group =' :rg_move_fence[83] awk '{print $3}' :rg_move_fence[83] eval RGNAME='"epprd_rg"' :rg_move_fence[1] RGNAME=epprd_rg +epprd_rg:rg_move_fence[84] GROUPNAME=epprd_rg +epprd_rg:rg_move_fence[85] group_state='$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[86] set +u +epprd_rg:rg_move_fence[87] eval print '$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[1] print ONLINE +epprd_rg:rg_move_fence[87] RG_MOVE_ONLINE=ONLINE +epprd_rg:rg_move_fence[87] export RG_MOVE_ONLINE +epprd_rg:rg_move_fence[88] set -u +epprd_rg:rg_move_fence[89] RG_MOVE_ONLINE=ONLINE +epprd_rg:rg_move_fence[91] set -a +epprd_rg:rg_move_fence[92] clsetenvgrp epprda rg_move epprd_rg '' :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 +epprd_rg:rg_move_fence[92] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[93] RC=0 +epprd_rg:rg_move_fence[94] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[1] FORCEDOWN_GROUPS='' +epprd_rg:rg_move_fence[2] RESOURCE_GROUPS='' +epprd_rg:rg_move_fence[3] HOMELESS_GROUPS='' +epprd_rg:rg_move_fence[4] HOMELESS_FOLLOWER_GROUPS='' +epprd_rg:rg_move_fence[5] ERRSTATE_GROUPS='' +epprd_rg:rg_move_fence[6] PRINCIPAL_ACTIONS='' +epprd_rg:rg_move_fence[7] ASSOCIATE_ACTIONS='' +epprd_rg:rg_move_fence[8] AUXILLIARY_ACTIONS='' +epprd_rg:rg_move_fence[8] SIBLING_GROUPS='' +epprd_rg:rg_move_fence[9] SIBLING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[10] SIBLING_ACQUIRING_GROUPS='' +epprd_rg:rg_move_fence[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[12] SIBLING_RELEASING_GROUPS='' +epprd_rg:rg_move_fence[13] SIBLING_RELEASING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[95] set +a +epprd_rg:rg_move_fence[96] [ 0 -ne 0 ] +epprd_rg:rg_move_fence[103] process_resources FENCE :rg_move_fence[3318] version=1.169 :rg_move_fence[3321] STATUS=0 :rg_move_fence[3322] sddsrv_off=FALSE :rg_move_fence[3324] true :rg_move_fence[3326] : call rgpa, and it will tell us what to do next :rg_move_fence[3328] set -a :rg_move_fence[3329] clRGPA FENCE :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa FENCE 2023-01-28T18:05:41.295996 clrgpa :clRGPA[+55] exit 0 :rg_move_fence[3329] eval JOB_TYPE=NONE :rg_move_fence[1] JOB_TYPE=NONE :rg_move_fence[3330] RC=0 :rg_move_fence[3331] set +a :rg_move_fence[3333] (( 0 != 0 )) :rg_move_fence[3342] RESOURCE_GROUPS='' :rg_move_fence[3343] GROUPNAME='' :rg_move_fence[3343] export GROUPNAME :rg_move_fence[3353] IS_SERVICE_START=1 :rg_move_fence[3354] IS_SERVICE_STOP=1 :rg_move_fence[3360] [[ NONE == RELEASE ]] :rg_move_fence[3360] [[ NONE == ONLINE ]] :rg_move_fence[3729] break :rg_move_fence[3740] : If sddsrv was turned off above, turn it back on again :rg_move_fence[3742] [[ FALSE == TRUE ]] :rg_move_fence[3747] exit 0 +epprd_rg:rg_move_fence[104] : exit status of process_resources FENCE is: 0 +epprd_rg:rg_move_fence[107] [[ TRUE == TRUE ]] +epprd_rg:rg_move_fence[109] export EVENT_TYPE +epprd_rg:rg_move_fence[110] echo RELEASE_PRIMARY RELEASE_PRIMARY +epprd_rg:rg_move_fence[111] [[ -n '' ]] +epprd_rg:rg_move_fence[141] exit 0 Jan 28 2023 18:05:41 EVENT COMPLETED: rg_move_fence epprda 1 0 |2023-01-28T18:05:41|1583|EVENT COMPLETED: rg_move_fence epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:05:41.392724 + echo '|2023-01-28T18:05:41.392724|INFO: rg_move_fence|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log PowerHA SystemMirror Event Summary ---------------------------------------------------------------------------- Serial number for this event: 1583 Event: TE_RG_MOVE_RELEASE Start time: Sat Jan 28 18:05:27 2023 End time: Sat Jan 28 18:05:41 2023 Action: Resource: Script Name: ---------------------------------------------------------------------------- Releasing resource group: epprd_rg process_resources Search on: Sat.Jan.28.18:05:28.KORST.2023.process_resources.epprd_rg.ref Releasing resource: All_servers stop_server Search on: Sat.Jan.28.18:05:28.KORST.2023.stop_server.All_servers.epprd_rg.ref Resource offline: All_nonerror_servers stop_server Search on: Sat.Jan.28.18:05:28.KORST.2023.stop_server.All_nonerror_servers.epprd_rg.ref Releasing resource: All_nfs_mounts cl_deactivate_nfs Search on: Sat.Jan.28.18:05:29.KORST.2023.cl_deactivate_nfs.All_nfs_mounts.epprd_rg.ref Resource offline: All_nonerror_nfs_mounts cl_deactivate_nfs Search on: Sat.Jan.28.18:05:33.KORST.2023.cl_deactivate_nfs.All_nonerror_nfs_mounts.epprd_rg.ref Releasing resource: All_exports cl_unexport_fs Search on: Sat.Jan.28.18:05:33.KORST.2023.cl_unexport_fs.All_exports.epprd_rg.ref Resource offline: All_nonerror_exports cl_unexport_fs Search on: Sat.Jan.28.18:05:33.KORST.2023.cl_unexport_fs.All_nonerror_exports.epprd_rg.ref Releasing resource: All_filesystems cl_deactivate_fs Search on: Sat.Jan.28.18:05:34.KORST.2023.cl_deactivate_fs.All_filesystems.epprd_rg.ref Resource offline: All_non_error_filesystems cl_deactivate_fs Search on: Sat.Jan.28.18:05:36.KORST.2023.cl_deactivate_fs.All_non_error_filesystems.epprd_rg.ref Releasing resource: All_volume_groups cl_deactivate_vgs Search on: Sat.Jan.28.18:05:36.KORST.2023.cl_deactivate_vgs.All_volume_groups.epprd_rg.ref Resource offline: All_nonerror_volume_groups cl_deactivate_vgs Search on: Sat.Jan.28.18:05:37.KORST.2023.cl_deactivate_vgs.All_nonerror_volume_groups.epprd_rg.ref Releasing resource: All_service_addrs release_service_addr Search on: Sat.Jan.28.18:05:37.KORST.2023.release_service_addr.All_service_addrs.epprd_rg.ref Resource offline: All_nonerror_service_addrs release_service_addr Search on: Sat.Jan.28.18:05:38.KORST.2023.release_service_addr.All_nonerror_service_addrs.epprd_rg.ref Resource group offline: epprd_rg process_resources Search on: Sat.Jan.28.18:05:40.KORST.2023.process_resources.epprd_rg.ref ---------------------------------------------------------------------------- |EVENT_SUMMARY_START|TE_RG_MOVE_RELEASE|2023-01-28T18:05:27|2023-01-28T18:05:41|1583| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:05:28.KORST.2023.process_resources.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:05:28.KORST.2023.stop_server.All_servers.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:05:28.KORST.2023.stop_server.All_nonerror_servers.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:05:29.KORST.2023.cl_deactivate_nfs.All_nfs_mounts.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:05:33.KORST.2023.cl_deactivate_nfs.All_nonerror_nfs_mounts.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:05:33.KORST.2023.cl_unexport_fs.All_exports.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:05:33.KORST.2023.cl_unexport_fs.All_nonerror_exports.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:05:34.KORST.2023.cl_deactivate_fs.All_filesystems.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:05:36.KORST.2023.cl_deactivate_fs.All_non_error_filesystems.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:05:36.KORST.2023.cl_deactivate_vgs.All_volume_groups.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:05:37.KORST.2023.cl_deactivate_vgs.All_nonerror_volume_groups.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:05:37.KORST.2023.release_service_addr.All_service_addrs.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:05:38.KORST.2023.release_service_addr.All_nonerror_service_addrs.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:05:40.KORST.2023.process_resources.epprd_rg.ref.ref| |EVENT_SUMMARY_END| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 1591 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_NETWORK|2023-01-28T18:05:42|1591| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Jan 28 2023 18:05:42 EVENT START: network_up epprda net_ether_01 |2023-01-28T18:05:42|1591|EVENT START: network_up epprda net_ether_01| :network_up[+66] version=%I% :network_up[+69] set -a :network_up[+70] cllsparam -n epprda :network_up[+70] eval NODE_NAME=epprda VERBOSE_LOGGING=high PS4='${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO] ' DEBUG_LEVEL=Standard LC_ALL='C' :network_up[+70] NODE_NAME=epprda VERBOSE_LOGGING=high PS4=${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO] DEBUG_LEVEL=Standard LC_ALL=C :network_up[+71] set +a :network_up[+73] STATUS=0 :network_up[+75] [ 2 -ne 2 ] :network_up[+81] [[ epprda == epprda ]] :network_up[+82] amlog_trace 1591|epprda|net_ether_01 :network_up[+61] clcycle clavailability.log :network_up[+61] 1> /dev/null 2>& 1 :network_up[+61] :network_up[+61] cltime DATE=2023-01-28T18:05:43.007262 :network_up[+61] echo |2023-01-28T18:05:43.007262|INFO: 1591|epprda|net_ether_01 :network_up[+61] 1>> /var/hacmp/availability/clavailability.log :network_up[+84] export NETWORKNAME=net_ether_01 :network_up[+89] [[ epprda == epprda ]] :network_up[+90] amlog_trace 1591|epprda|net_ether_01 :network_up[+61] clcycle clavailability.log :network_up[+61] 1> /dev/null 2>& 1 :network_up[+61] :network_up[+61] cltime DATE=2023-01-28T18:05:43.034814 :network_up[+61] echo |2023-01-28T18:05:43.034814|INFO: 1591|epprda|net_ether_01 :network_up[+61] 1>> /var/hacmp/availability/clavailability.log :network_up[+92] exit 0 Jan 28 2023 18:05:43 EVENT COMPLETED: network_up epprda net_ether_01 0 |2023-01-28T18:05:43|1591|EVENT COMPLETED: network_up epprda net_ether_01 0| Jan 28 2023 18:05:43 EVENT START: network_up_complete epprda net_ether_01 |2023-01-28T18:05:43|1591|EVENT START: network_up_complete epprda net_ether_01| :network_up_complete[+68] version=%I% :network_up_complete[+72] [ 2 -ne 2 ] :network_up_complete[+78] [[ epprda == epprda ]] :network_up_complete[+79] amlog_trace 1591|epprda|net_ether_01 :network_up_complete[+61] clcycle clavailability.log :network_up_complete[+61] 1> /dev/null 2>& 1 :network_up_complete[+61] :network_up_complete[+61] cltime DATE=2023-01-28T18:05:43.297862 :network_up_complete[+61] echo |2023-01-28T18:05:43.297862|INFO: 1591|epprda|net_ether_01 :network_up_complete[+61] 1>> /var/hacmp/availability/clavailability.log :network_up_complete[+82] NODENAME=epprda :network_up_complete[+83] NETWORK=net_ether_01 :network_up_complete[+84] export NETWORKNAME=net_ether_01 :network_up_complete[+86] [[ -z ]] :network_up_complete[+88] EMULATE=REAL :network_up_complete[+90] set -u :network_up_complete[+96] STATUS=0 :network_up_complete[+100] odmget HACMPnode :network_up_complete[+100] grep name = :network_up_complete[+100] sort :network_up_complete[+100] uniq :network_up_complete[+100] wc -l :network_up_complete[+100] [ 2 -eq 2 ] :network_up_complete[+102] :network_up_complete[+102] odmget HACMPgroup :network_up_complete[+102] grep group = :network_up_complete[+102] awk {print $3} :network_up_complete[+102] sed s/"//g RESOURCE_GROUPS=epprd_rg :network_up_complete[+106] :network_up_complete[+106] odmget -q group=epprd_rg AND name=EXPORT_FILESYSTEM HACMPresource :network_up_complete[+106] grep value :network_up_complete[+106] sed s/"//g :network_up_complete[+106] awk {print $3} EXPORTLIST=/board_org /sapmnt/EPP :network_up_complete[+107] [ -n /board_org /sapmnt/EPP ] :network_up_complete[+109] [ REAL = EMUL ] :network_up_complete[+114] cl_update_statd :cl_update_statd(0)[+174] version=%I% :cl_update_statd(0)[+176] typeset -i RC=0 :cl_update_statd(0)[+178] LOCAL_FOUND= :cl_update_statd(0)[+179] TWIN_NAME= :cl_update_statd(0)[+180] [[ -z epprda ]] :cl_update_statd(0)[+181] :cl_update_statd(0)[+181] cl_get_path -S OP_SEP=~ :cl_update_statd(0)[+182] set -u :cl_update_statd(0)[+187] LOCAL_FOUND=true :cl_update_statd(0)[+189] TWIN_NAME=epprds :cl_update_statd(0)[+194] : Make sure statd is running locally :cl_update_statd(0)[+196] lssrc -s statd :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw inoperative :cl_update_statd(0)[+196] rpcinfo -p :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw status :cl_update_statd(0)[+207] : Get the current twin, if there is one :cl_update_statd(0)[+209] :cl_update_statd(0)[+209] nfso -H sm_gethost :cl_update_statd(0)[+209] 2>& 1 CURTWIN=epprds :cl_update_statd(0)[+210] RC=0 :cl_update_statd(0)[+212] [[ -z true ]] :cl_update_statd(0)[+212] [[ -z epprds ]] :cl_update_statd(0)[+225] : Get the interface to the twin node :cl_update_statd(0)[+227] :cl_update_statd(0)[+227] get_node_ip epprds :cl_update_statd(0)[+9] (( 1 != 1 )) :cl_update_statd(0)[+15] Twin_Name=epprds :cl_update_statd(0)[+16] NewTwin= :cl_update_statd(0)[+19] : Get the Interface details for every interface on the twin node :cl_update_statd(0)[+20] : Reject interfaces on nodes that are not public boot addresses :cl_update_statd(0)[+21] : because those are the only ones we have state information for :cl_update_statd(0)[+23] :cl_update_statd(0)[+23] cllsif -J ~ -Sw -i epprda :cl_update_statd(0)[+23] LC_ALL=C LOCAL_NETWORK_INFO=epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+24] cllsif -J ~ -Sw -i epprds :cl_update_statd(0)[+24] LC_ALL=C :cl_update_statd(0)[+25] read adapt type network net_type attrib node ip_addr skip interface skip netmask skip skip prefix ip_family :cl_update_statd(0)[+25] IFS=~ :cl_update_statd(0)[+25] [[ public != public ]] :cl_update_statd(0)[+25] [[ boot != boot ]] :cl_update_statd(0)[+33] : Find the state of this candidate :cl_update_statd(0)[+33] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+37] :cl_update_statd(0)[+37] print 61.81.244.123 :cl_update_statd(0)[+37] tr ./ xx addr=i61x81x244x123_epprds :cl_update_statd(0)[+43] eval candidate_state=${i61x81x244x123_epprds:-down} :cl_update_statd(0)[+43] candidate_state=UP :cl_update_statd(0)[+46] : If state is UP, check to see if this node can talk to it :cl_update_statd(0)[+46] [[ UP == UP ]] :cl_update_statd(0)[+50] ping -w 5 -c 1 -q 61.81.244.123 :cl_update_statd(0)[+50] 1> /dev/null :cl_update_statd(0)[+61] echo epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+61] tr \n :cl_update_statd(0)[+62] read lcl_adapt lcl_type lcl_network lcl_net_type lcl_attrib lcl_node lcl_ip_addr skip lcl_interface skip lcl_netmask skip skip lcl_prefix lcl_ip_family :cl_update_statd(0)[+62] IFS=~ :cl_update_statd(0)[+62] [[ net_ether_01 != net_ether_01 ]] :cl_update_statd(0)[+62] [[ boot != boot ]] :cl_update_statd(0)[+62] [[ public != public ]] :cl_update_statd(0)[+62] [[ AF_INET != AF_INET ]] :cl_update_statd(0)[+62] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+71] :cl_update_statd(0)[+71] print 61.81.244.134 :cl_update_statd(0)[+71] tr ./ xx addr=i61x81x244x134_epprda :cl_update_statd(0)[+77] eval lcl_candidate_state=${i61x81x244x134_epprda:-down} :cl_update_statd(0)[+77] lcl_candidate_state=UP :cl_update_statd(0)[+77] [[ UP == UP ]] :cl_update_statd(0)[+81] : epprds is on the same network as an interface that is up :cl_update_statd(0)[+82] : on the local node, and the attributes match. :cl_update_statd(0)[+84] NewTwin=epprds :cl_update_statd(0)[+85] break :cl_update_statd(0)[+85] [[ -n epprds ]] :cl_update_statd(0)[+91] break :cl_update_statd(0)[+91] [[ -z epprds ]] :cl_update_statd(0)[+100] echo epprds :cl_update_statd(0)[+101] return 0 NEWTWIN=epprds :cl_update_statd(0)[+227] [[ -z epprds ]] :cl_update_statd(0)[+227] [[ epprds != epprds ]] :cl_update_statd(0)[+259] : RC is actually 0 :cl_update_statd(0)[+266] return 0 :network_up_complete[+115] [ 0 -ne 0 ] :network_up_complete[+120] break :network_up_complete[+125] [[ epprda == epprda ]] :network_up_complete[+131] :network_up_complete[+131] odmget -qname=net_ether_01 HACMPnetwork :network_up_complete[+131] awk $1 == "alias" {print $3} :network_up_complete[+131] sed s/"//g ALIASING=1 :network_up_complete[+131] [[ 1 == 1 ]] :network_up_complete[+133] cl_configure_persistent_address aliasing_network_up -n net_ether_01 :cl_configure_persistent_address[1344] version=1.56.1.4 :cl_configure_persistent_address[1346] cl_get_path -S :cl_configure_persistent_address[1346] OP_SEP='~' :cl_configure_persistent_address[1349] get_local_nodename :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :cl_configure_persistent_address[1349] LOCALNODENAME=epprda :cl_configure_persistent_address[1354] [[ -z epprda ]] :cl_configure_persistent_address[1356] NETWORK='' :cl_configure_persistent_address[1357] ALIVE_IF='' :cl_configure_persistent_address[1358] FAILED_IF='' :cl_configure_persistent_address[1359] FAILED_ADDRESS='' :cl_configure_persistent_address[1360] UPDATE_CLSTRMGR=1 :cl_configure_persistent_address[1361] CHECK_HA_ALIVE=1 :cl_configure_persistent_address[1362] RESTORE_ROUTES=/usr/es/sbin/cluster/.pers_restore_routes :cl_configure_persistent_address[1363] RC=0 :cl_configure_persistent_address[1364] B_FLAG=0 :cl_configure_persistent_address[1366] ACTION=aliasing_network_up :cl_configure_persistent_address[1367] shift :cl_configure_persistent_address[1369] getopt n:a:f:i:dPB -n net_ether_01 :cl_configure_persistent_address[1369] set -- -n net_ether_01 -- :cl_configure_persistent_address[1371] (( 0 != 0 )) :cl_configure_persistent_address[1371] [[ -z aliasing_network_up ]] :cl_configure_persistent_address[1376] [[ -n != -- ]] :cl_configure_persistent_address[1379] NETWORK=net_ether_01 :cl_configure_persistent_address[1380] shift :cl_configure_persistent_address[1380] shift :cl_configure_persistent_address[1376] [[ -- != -- ]] :cl_configure_persistent_address[1418] shift :cl_configure_persistent_address[1422] [[ aliasing_network_up == up ]] :cl_configure_persistent_address[1520] [[ aliasing_network_up == swap ]] :cl_configure_persistent_address[1667] [[ aliasing_network_up == fail_boot ]] :cl_configure_persistent_address[1830] [[ aliasing_network_up == aliasing_network_up ]] :cl_configure_persistent_address[1831] [[ -z net_ether_01 ]] :cl_configure_persistent_address[1837] isAliasingNetwork net_ether_01 :cl_configure_persistent_address[isAliasingNetwork:386] PS4_FUNC=isAliasingNetwork :cl_configure_persistent_address[isAliasingNetwork:386] typeset PS4_FUNC :cl_configure_persistent_address[isAliasingNetwork:387] [[ high == high ]] :cl_configure_persistent_address[isAliasingNetwork:387] set -x :cl_configure_persistent_address[isAliasingNetwork:389] NETWORK=net_ether_01 :cl_configure_persistent_address[isAliasingNetwork:391] odmget -qname=net_ether_01 HACMPnetwork :cl_configure_persistent_address[isAliasingNetwork:392] awk '$1 == "alias" {print $3}' :cl_configure_persistent_address[isAliasingNetwork:393] sed 's/"//g' :cl_configure_persistent_address[isAliasingNetwork:391] print 1 :cl_configure_persistent_address[1837] [[ 1 != 1 ]] :cl_configure_persistent_address[1842] cllsif -J '~' -Spi epprda :cl_configure_persistent_address[1842] awk -F~ '$2 == "persistent" && $3 == "net_ether_01" {print $1}' :cl_configure_persistent_address[1842] PERSISTENT='' :cl_configure_persistent_address[1844] [[ -z '' ]] :cl_configure_persistent_address[1846] exit 0 :network_up_complete[+141] :network_up_complete[+141] cl_rrmethods2call net_initialization :cl_rrmethods2call[56] version=%I% :cl_rrmethods2call[84] RRMETHODS='' :cl_rrmethods2call[85] NEED_RR_ENV_VARS=no :cl_rrmethods2call[90] : The network methods are returned if the Network type is XD_data. :cl_rrmethods2call[92] clodmget -qname=net_ether_01 -f nimname -n HACMPnetwork :cl_rrmethods2call[92] RRNET=ether :cl_rrmethods2call[94] [[ ether == XD_data ]] :cl_rrmethods2call[98] return 0 METHODS= :network_up_complete[+163] :network_up_complete[+163] clodmget -n -q name=MOUNT_FILESYSTEM -f group HACMPresource CROSSMOUNTS=epprd_rg :network_up_complete[+165] [ -n epprd_rg -a epprda = epprda ] :network_up_complete[+168] : Remount any NFS cross mount if required :network_up_complete[+174] :network_up_complete[+174] clodmget -n -f group HACMPgroup RESOURCE_GROUPS=epprd_rg :network_up_complete[+185] :network_up_complete[+185] clodmget -n -q name=MOUNT_FILESYSTEM and group=epprd_rg -f value HACMPresource MOUNT_FILESYSTEM=/board;/board_org :network_up_complete[+185] [[ -z /board;/board_org ]] :network_up_complete[+189] IN_RG=false :network_up_complete[+189] clodmget -n -q group=epprd_rg -f nodes HACMPgroup :network_up_complete[+189] [[ epprda == epprda ]] :network_up_complete[+192] IN_RG=true :network_up_complete[+192] [[ epprds == epprda ]] :network_up_complete[+192] [[ true == false ]] :network_up_complete[+197] :network_up_complete[+197] clRGinfo -s epprd_rg clRGinfo[431]: version I clRGinfo[517]: Number of resource groups = 1 clRGinfo[562]: cluster epprda_cluster is version = 22 clRGinfo[1439]: IPC target host name is 'localhost' clRGinfo[685]: Current group is 'epprd_rg' get primary state info for state 4 get secondary state info for state 4 getPrimaryStateStr: using primary_table => primary_state_for_short_output_table get primary state info for state 4 get secondary state info for state 4 getPreviousStateString: Primary=4, Sec=-1 get primary state info for state 4 get secondary state info for state 4 getPrimaryStateStr: using primary_table => primary_state_for_short_output_table get primary state info for state 4 get secondary state info for state 4 getPreviousStateString: Primary=4, Sec=-1 :network_up_complete[+197] awk -F : { if ( $2 == "ONLINE" ) print $3 } NFS_HOST= :network_up_complete[+197] [[ -z ]] :network_up_complete[+198] continue :network_up_complete[+257] [[ epprda == epprda ]] :network_up_complete[+257] [[ 0 -ne 0 ]] :network_up_complete[+262] amlog_trace 1591|epprda|net_ether_01 :network_up_complete[+61] clcycle clavailability.log :network_up_complete[+61] 1> /dev/null 2>& 1 :network_up_complete[+61] :network_up_complete[+61] cltime DATE=2023-01-28T18:05:43.494681 :network_up_complete[+61] echo |2023-01-28T18:05:43.494681|INFO: 1591|epprda|net_ether_01 :network_up_complete[+61] 1>> /var/hacmp/availability/clavailability.log :network_up_complete[+265] exit 0 Jan 28 2023 18:05:43 EVENT COMPLETED: network_up_complete epprda net_ether_01 0 |2023-01-28T18:05:43|1591|EVENT COMPLETED: network_up_complete epprda net_ether_01 0| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 1590 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_FAIL_ADAPTER|2023-01-28T18:05:45|1590| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Jan 28 2023 18:05:45 EVENT START: fail_interface epprda 61.81.244.134 |2023-01-28T18:05:45|1590|EVENT START: fail_interface epprda 61.81.244.134| :fail_interface[+64] version=%I% :fail_interface[+66] :fail_interface[+66] cl_get_path -S OP_SEP=~ :fail_interface[+68] [ 2 -ne 2 ] :fail_interface[+74] NODENAME=epprda :fail_interface[+75] ADDR=61.81.244.134 :fail_interface[+76] PREFIX_LEN= :fail_interface[+77] ADDR_FAMILY= :fail_interface[+79] set -u :fail_interface[+81] :fail_interface[+81] dspmsg scripts.cat 8062 Interface 61.81.244.134 has failed on node epprda.\n 61.81.244.134 epprda MSG=Interface 61.81.244.134 has failed on node epprda. :fail_interface[+82] echo Interface 61.81.244.134 has failed on node epprda. :fail_interface[+82] 1> /dev/console :fail_interface[+84] [[ epprda = epprda ]] :fail_interface[+88] :fail_interface[+88] cllsif -J ~ -Sn 61.81.244.134 :fail_interface[+88] cut -d~ -f3 NETWORK=net_ether_01 :fail_interface[+91] :fail_interface[+91] odmget -qname=net_ether_01 HACMPnetwork :fail_interface[+91] awk $1 == "alias" {print $3} :fail_interface[+91] sed s/"//g ALIASING=1 :fail_interface[+91] [[ 1 = 1 ]] :fail_interface[+96] set +u :fail_interface[+97] saveNSORDER=UNDEFINED :fail_interface[+98] set -u :fail_interface[+99] NSORDER=local :fail_interface[+99] export NSORDER :fail_interface[+100] netstat -in Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll en0 1500 link#2 fa.e6.13.4e.a9.20 183746538 0 60760789 0 0 en0 1500 61.81.244 61.81.244.134 183746538 0 60760789 0 0 lo0 16896 link#1 34272118 0 34272118 0 0 lo0 16896 127 127.0.0.1 34272118 0 34272118 0 0 lo0 16896 ::1%1 34272118 0 34272118 0 0 :fail_interface[+101] netstat -rnC Routing tables Destination Gateway Flags Wt Policy If Cost Config_Cost Route tree for Protocol Family 2 (Internet): default 61.81.244.1 UG 1 - en0 0 0 61.81.244.0 61.81.244.134 UHSb 1 - en0 0 0 => 61.81.244/24 61.81.244.134 U 1 - en0 0 0 61.81.244.134 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.255 61.81.244.134 UHSb 1 - en0 0 0 127/8 127.0.0.1 U 1 - lo0 0 0 Route tree for Protocol Family 24 (Internet v6): ::1%1 ::1%1 UH 1 - lo0 0 0 :fail_interface[+102] cl_configure_persistent_address fail_boot -i 61.81.244.134 -n net_ether_01 :cl_configure_persistent_address[1344] version=1.56.1.4 :cl_configure_persistent_address[1346] cl_get_path -S :cl_configure_persistent_address[1346] OP_SEP='~' :cl_configure_persistent_address[1349] get_local_nodename :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :cl_configure_persistent_address[1349] LOCALNODENAME=epprda :cl_configure_persistent_address[1354] [[ -z epprda ]] :cl_configure_persistent_address[1356] NETWORK='' :cl_configure_persistent_address[1357] ALIVE_IF='' :cl_configure_persistent_address[1358] FAILED_IF='' :cl_configure_persistent_address[1359] FAILED_ADDRESS='' :cl_configure_persistent_address[1360] UPDATE_CLSTRMGR=1 :cl_configure_persistent_address[1361] CHECK_HA_ALIVE=1 :cl_configure_persistent_address[1362] RESTORE_ROUTES=/usr/es/sbin/cluster/.pers_restore_routes :cl_configure_persistent_address[1363] RC=0 :cl_configure_persistent_address[1364] B_FLAG=0 :cl_configure_persistent_address[1366] ACTION=fail_boot :cl_configure_persistent_address[1367] shift :cl_configure_persistent_address[1369] getopt n:a:f:i:dPB -i 61.81.244.134 -n net_ether_01 :cl_configure_persistent_address[1369] set -- -i 61.81.244.134 -n net_ether_01 -- :cl_configure_persistent_address[1371] (( 0 != 0 )) :cl_configure_persistent_address[1371] [[ -z fail_boot ]] :cl_configure_persistent_address[1376] [[ -i != -- ]] :cl_configure_persistent_address[1392] FAILED_ADDRESS=61.81.244.134 :cl_configure_persistent_address[1393] shift :cl_configure_persistent_address[1393] shift :cl_configure_persistent_address[1376] [[ -n != -- ]] :cl_configure_persistent_address[1379] NETWORK=net_ether_01 :cl_configure_persistent_address[1380] shift :cl_configure_persistent_address[1380] shift :cl_configure_persistent_address[1376] [[ -- != -- ]] :cl_configure_persistent_address[1418] shift :cl_configure_persistent_address[1422] [[ fail_boot == up ]] :cl_configure_persistent_address[1520] [[ fail_boot == swap ]] :cl_configure_persistent_address[1667] [[ fail_boot == fail_boot ]] :cl_configure_persistent_address[1668] [[ -z 61.81.244.134 ]] :cl_configure_persistent_address[1668] [[ -z net_ether_01 ]] :cl_configure_persistent_address[1672] clgetif -a 61.81.244.134 :cl_configure_persistent_address[1672] 2> /dev/null :cl_configure_persistent_address[1672] awk '{print $1}' :cl_configure_persistent_address[1672] IF=en0 :cl_configure_persistent_address[1673] cllsif -J '~' -Sn 61.81.244.134 :cl_configure_persistent_address[1673] cut -d~ -f3 :cl_configure_persistent_address[1673] NETWORK=net_ether_01 :cl_configure_persistent_address[1677] isAliasingNetwork net_ether_01 :cl_configure_persistent_address[isAliasingNetwork:386] PS4_FUNC=isAliasingNetwork :cl_configure_persistent_address[isAliasingNetwork:386] typeset PS4_FUNC :cl_configure_persistent_address[isAliasingNetwork:387] [[ high == high ]] :cl_configure_persistent_address[isAliasingNetwork:387] set -x :cl_configure_persistent_address[isAliasingNetwork:389] NETWORK=net_ether_01 :cl_configure_persistent_address[isAliasingNetwork:391] odmget -qname=net_ether_01 HACMPnetwork :cl_configure_persistent_address[isAliasingNetwork:392] awk '$1 == "alias" {print $3}' :cl_configure_persistent_address[isAliasingNetwork:393] sed 's/"//g' :cl_configure_persistent_address[isAliasingNetwork:391] print 1 :cl_configure_persistent_address[1677] [[ 1 != 1 ]] :cl_configure_persistent_address[1682] cllsif -J '~' -Spi epprda :cl_configure_persistent_address[1682] awk -F~ '$2 == "persistent" && $3 == "net_ether_01" {print $1}' :cl_configure_persistent_address[1682] PERSISTENT='' :cl_configure_persistent_address[1684] [[ -z '' ]] :cl_configure_persistent_address[1686] exit 0 :fail_interface[+106] :fail_interface[+106] clgetif -n 61.81.244.134 :fail_interface[+106] LC_ALL=C NETMASK=255.255.255.0 :fail_interface[+107] :fail_interface[+107] clgetif -a 61.81.244.134 :fail_interface[+107] LC_ALL=C IF1=en0 :fail_interface[+108] BOOT1=61.81.244.134 :fail_interface[+111] :fail_interface[+111] cllsif -J ~ -Si epprda :fail_interface[+111] awk -F~ -v net=net_ether_01 -v if1=en0 ($2=="boot" && \ $3==net && $9!=if1) {printf("%s\n",$7)} BOOT2= :fail_interface[+111] [[ -n ]] :fail_interface[+111] [[ UNDEFINED != UNDEFINED ]] :fail_interface[+179] export NSORDER= :fail_interface[+184] exit 0 Jan 28 2023 18:05:45 EVENT COMPLETED: fail_interface epprda 61.81.244.134 0 |2023-01-28T18:05:45|1590|EVENT COMPLETED: fail_interface epprda 61.81.244.134 0| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 1593 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_ADAPTER|2023-01-28T18:05:47|1593| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Jan 28 2023 18:05:48 EVENT START: join_interface epprda 61.81.244.134 |2023-01-28T18:05:48|1593|EVENT START: join_interface epprda 61.81.244.134| :join_interface[+64] version=%I% :join_interface[+67] :join_interface[+67] cl_get_path -S OP_SEP=~ :join_interface[+69] [ 2 -ne 2 ] :join_interface[+75] NODENAME=epprda :join_interface[+76] ADDR=61.81.244.134 :join_interface[+77] BOOTIF= :join_interface[+78] ADDR_FAMILY= :join_interface[+79] PREFIX_LEN= :join_interface[+81] set -u :join_interface[+82] [[ epprda = epprda ]] :join_interface[+88] :join_interface[+88] cllsif -J ~ -Si epprda :join_interface[+88] awk -F~ -v bootif=61.81.244.134 ($2=="boot" && $7==bootif) \ {printf("%s\n",$9)} BOOTIF=en0 :join_interface[+88] [[ -n en0 ]] :join_interface[+91] cllsif -J ~ -Sn 61.81.244.134 :join_interface[+91] cut -d~ -f12 :join_interface[+92] tr ~ :join_interface[+92] read IF_ALIAS :join_interface[+92] [[ -n ]] :join_interface[+121] :join_interface[+121] dspmsg scripts.cat 8064 Interface 61.81.244.134 is now available on node epprda.\n 61.81.244.134 epprda MSG=Interface 61.81.244.134 is now available on node epprda. :join_interface[+122] echo Interface 61.81.244.134 is now available on node epprda. :join_interface[+122] 1> /dev/console :join_interface[+124] exit 0 Jan 28 2023 18:05:48 EVENT COMPLETED: join_interface epprda 61.81.244.134 0 |2023-01-28T18:05:48|1593|EVENT COMPLETED: join_interface epprda 61.81.244.134 0| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 1592 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_ADAPTER|2023-01-28T18:05:50|1592| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Jan 28 2023 18:05:50 EVENT START: join_interface epprda 61.81.244.156 |2023-01-28T18:05:50|1592|EVENT START: join_interface epprda 61.81.244.156| :join_interface[+64] version=%I% :join_interface[+67] :join_interface[+67] cl_get_path -S OP_SEP=~ :join_interface[+69] [ 2 -ne 2 ] :join_interface[+75] NODENAME=epprda :join_interface[+76] ADDR=61.81.244.156 :join_interface[+77] BOOTIF= :join_interface[+78] ADDR_FAMILY= :join_interface[+79] PREFIX_LEN= :join_interface[+81] set -u :join_interface[+82] [[ epprda = epprda ]] :join_interface[+88] :join_interface[+88] cllsif -J ~ -Si epprda :join_interface[+88] awk -F~ -v bootif=61.81.244.156 ($2=="boot" && $7==bootif) \ {printf("%s\n",$9)} BOOTIF= :join_interface[+88] [[ -n ]] :join_interface[+121] :join_interface[+121] dspmsg scripts.cat 8064 Interface 61.81.244.156 is now available on node epprda.\n 61.81.244.156 epprda MSG=Interface 61.81.244.156 is now available on node epprda. :join_interface[+122] echo Interface 61.81.244.156 is now available on node epprda. :join_interface[+122] 1> /dev/console :join_interface[+124] exit 0 Jan 28 2023 18:05:50 EVENT COMPLETED: join_interface epprda 61.81.244.156 0 |2023-01-28T18:05:50|1592|EVENT COMPLETED: join_interface epprda 61.81.244.156 0| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 1584 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_FAIL_NODE_DEP_COMPLETE|2023-01-28T18:05:52|1584| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Jan 28 2023 18:05:52 EVENT START: node_down_complete epprda |2023-01-28T18:05:52|1584|EVENT START: node_down_complete epprda| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:05:52.645157 + echo '|2023-01-28T18:05:52.645157|INFO: node_down_complete|epprda' + 1>> /var/hacmp/availability/clavailability.log :node_down_complete[107] version=%I% :node_down_complete[111] : Pick up input :node_down_complete[113] NODENAME=epprda :node_down_complete[113] export NODENAME :node_down_complete[114] PARAM='' :node_down_complete[114] export PARAM :node_down_complete[116] NODE_HALT_CONTROL_FILE=/usr/es/sbin/cluster/etc/ha_nodehalt.lock :node_down_complete[125] STATUS=0 :node_down_complete[127] set -u :node_down_complete[129] (( 1 < 1 )) :node_down_complete[136] : serial number for this event is 1584 :node_down_complete[139] [[ '' == forced ]] :node_down_complete[151] : if RG_DEPENDENCIES is set to false by the cluster manager, :node_down_complete[152] : then resource groups will be processed via clsetenvgrp :node_down_complete[154] [[ '' != forced ]] :node_down_complete[154] [[ TRUE == FALSE ]] :node_down_complete[184] : For each participating resource group, serially process the resources :node_down_complete[186] LOCALCOMP=N :node_down_complete[189] : if RG_DEPENDENCIES is set to false by the cluster manager, :node_down_complete[190] : then resource groups will be processed via clsetenvgrp :node_down_complete[192] [[ '' != forced ]] :node_down_complete[192] [[ TRUE == FALSE ]] :node_down_complete[232] [[ '' != forced ]] :node_down_complete[232] [[ epprda == epprda ]] :node_down_complete[235] : Call ss-unload replicated resource methods if they are defined :node_down_complete[237] cl_rrmethods2call ss_unload :cl_rrmethods2call[56] version=%I% :cl_rrmethods2call[84] RRMETHODS='' :cl_rrmethods2call[85] NEED_RR_ENV_VARS=no :cl_rrmethods2call[104] : The load and unload methods if defined are returned on the :cl_rrmethods2call[105] : local node :cl_rrmethods2call[107] [[ epprda == epprda ]] :cl_rrmethods2call[109] NEED_RR_ENV_VARS=yes :cl_rrmethods2call[129] : Set the '*_REP_RESOURCE' variables if needed. :cl_rrmethods2call[131] [[ yes == yes ]] :cl_rrmethods2call[133] cllsres :cl_rrmethods2call[133] 2> /dev/null :cl_rrmethods2call[133] eval APPLICATIONS='"epprd_app"' EXPORT_FILESYSTEM='"/board_org' '/sapmnt/EPP"' FILESYSTEM='""' FORCED_VARYON='"false"' FSCHECK_TOOL='"fsck"' FS_BEFORE_IPADDR='"false"' MOUNT_FILESYSTEM='"/board;/board_org"' RECOVERY_METHOD='"sequential"' SERVICE_LABEL='"epprd"' SSA_DISK_FENCING='"false"' VG_AUTO_IMPORT='"false"' VOLUME_GROUP='"datavg"' USERDEFINED_RESOURCES='""' :cl_rrmethods2call[1] APPLICATIONS=epprd_app :cl_rrmethods2call[1] EXPORT_FILESYSTEM='/board_org /sapmnt/EPP' :cl_rrmethods2call[1] FILESYSTEM='' :cl_rrmethods2call[1] FORCED_VARYON=false :cl_rrmethods2call[1] FSCHECK_TOOL=fsck :cl_rrmethods2call[1] FS_BEFORE_IPADDR=false :cl_rrmethods2call[1] MOUNT_FILESYSTEM='/board;/board_org' :cl_rrmethods2call[1] RECOVERY_METHOD=sequential :cl_rrmethods2call[1] SERVICE_LABEL=epprd :cl_rrmethods2call[1] SSA_DISK_FENCING=false :cl_rrmethods2call[1] VG_AUTO_IMPORT=false :cl_rrmethods2call[1] VOLUME_GROUP=datavg :cl_rrmethods2call[1] USERDEFINED_RESOURCES='' :cl_rrmethods2call[137] [[ -n '' ]] :cl_rrmethods2call[142] [[ -n '' ]] :cl_rrmethods2call[147] [[ -n '' ]] :cl_rrmethods2call[152] [[ -n '' ]] :cl_rrmethods2call[157] [[ -n '' ]] :cl_rrmethods2call[162] [[ -n '' ]] :cl_rrmethods2call[167] [[ -n '' ]] :cl_rrmethods2call[172] [[ -n '' ]] :cl_rrmethods2call[182] [[ -z '' ]] :cl_rrmethods2call[184] typeset sysmgdata :cl_rrmethods2call[185] typeset reposmgdata :cl_rrmethods2call[186] [[ -x /usr/es/sbin/cluster/xd_generic/xd_cli/clxd_list_mg_smit ]] :cl_rrmethods2call[191] [[ -n '' ]] :cl_rrmethods2call[191] [[ -n '' ]] :cl_rrmethods2call[197] echo '' :cl_rrmethods2call[199] return 0 :node_down_complete[237] METHODS='' :node_down_complete[251] : If dependencies are configured and node is being forced down then :node_down_complete[252] : no need to do varyoff for any passive mode VGs :node_down_complete[254] [[ TRUE == TRUE ]] :node_down_complete[257] : If any volume groups were varied on in passive mode when this node :node_down_complete[258] : came up, all the prior resource group processing would have left them :node_down_complete[259] : in passive mode. Completely vary them off at this point. :node_down_complete[261] lsvg -L :node_down_complete[261] lsvg -L -o :node_down_complete[261] paste -s '-d|' - :node_down_complete[261] grep -w -v -x -E 'caavg_private|rootvg' :node_down_complete[261] INACTIVE_VGS=datavg :node_down_complete[264] lsvg -L datavg :node_down_complete[264] 2> /dev/null :node_down_complete[264] grep -i -q passive-only :node_down_complete[267] : Reset any read only fence height prior to vary off :node_down_complete[269] cl_set_vg_fence_height -c datavg rw cl_set_vg_fence_height[126]: version @(#)10 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 cl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY) cl_set_vg_fence_height[214]: read(datavg, 16) cl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0)) :node_down_complete[270] RC=0 :node_down_complete[271] (( 0 != 0 )) :node_down_complete[282] : 'lsvg ' will show if a volume group is varied :node_down_complete[283] : on in passive mode. Any such are varied off :node_down_complete[285] cltime 2023-01-28T18:05:52.766951 :node_down_complete[286] varyoffvg datavg :node_down_complete[287] RC=0 :node_down_complete[288] cltime 2023-01-28T18:05:52.898538 :node_down_complete[289] : rc_varyoffvg = 0 :node_down_complete[291] : Force a timestamp update to get timestamps in sync :node_down_complete[292] : since timing may prevent LVM from doing so :node_down_complete[294] cl_update_vg_odm_ts -o -f datavg :cl_update_vg_odm_ts(0.000)[77] version=1.13 :cl_update_vg_odm_ts(0.000)[121] o_flag='' :cl_update_vg_odm_ts(0.000)[122] f_flag='' :cl_update_vg_odm_ts(0.000)[123] getopts :of option :cl_update_vg_odm_ts(0.000)[126] : Local timestamps should be good, since volume group was :cl_update_vg_odm_ts(0.001)[127] : just varyied on or off :cl_update_vg_odm_ts(0.001)[128] o_flag=TRUE :cl_update_vg_odm_ts(0.001)[123] getopts :of option :cl_update_vg_odm_ts(0.001)[131] : Update timestamps clusterwide, even if LVM support is in :cl_update_vg_odm_ts(0.001)[132] : place :cl_update_vg_odm_ts(0.001)[133] f_flag=TRUE :cl_update_vg_odm_ts(0.001)[123] getopts :of option :cl_update_vg_odm_ts(0.001)[142] shift 2 :cl_update_vg_odm_ts(0.001)[144] vg_name=datavg :cl_update_vg_odm_ts(0.001)[145] [[ -z datavg ]] :cl_update_vg_odm_ts(0.001)[151] shift :cl_update_vg_odm_ts(0.001)[152] node_list='' :cl_update_vg_odm_ts(0.001)[153] /usr/es/sbin/cluster/utilities/cl_get_path all :cl_update_vg_odm_ts(0.004)[153] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin :cl_update_vg_odm_ts(0.004)[155] [[ -z TRUE ]] :cl_update_vg_odm_ts(0.004)[214] found_new_ts='' :cl_update_vg_odm_ts(0.004)[217] : Try to update the volume group ODM time stamp on every other node :cl_update_vg_odm_ts(0.004)[218] : in the resource group that owns datavg :cl_update_vg_odm_ts(0.004)[220] [[ -z '' ]] :cl_update_vg_odm_ts(0.004)[223] : We were not given a node list. The node list is derived from :cl_update_vg_odm_ts(0.004)[224] : the resource group that the volume group is in. :cl_update_vg_odm_ts(0.004)[226] /usr/es/sbin/cluster/utilities/clodmget -q 'name like *VOLUME_GROUP and value = datavg' -f group -n HACMPresource :cl_update_vg_odm_ts(0.006)[226] group_name=epprd_rg :cl_update_vg_odm_ts(0.006)[227] [[ -n epprd_rg ]] :cl_update_vg_odm_ts(0.006)[230] : Find all other cluster nodes in the resource group that owns :cl_update_vg_odm_ts(0.006)[231] : the volume group datavg :cl_update_vg_odm_ts(0.007)[233] /usr/es/sbin/cluster/utilities/clodmget -q 'group = epprd_rg' -f nodes -n HACMPgroup :cl_update_vg_odm_ts(0.009)[233] node_list='epprda epprds' :cl_update_vg_odm_ts(0.009)[238] : Check to see if the volume group is known locally :cl_update_vg_odm_ts(0.009)[240] odmget -q 'name = datavg and PdDvLn = logical_volume/vgsubclass/vgtype' CuDv :cl_update_vg_odm_ts(0.011)[240] [[ -z $'\nCuDv:\n\tname = "datavg"\n\tstatus = 1\n\tchgstatus = 1\n\tddins = ""\n\tlocation = ""\n\tparent = ""\n\tconnwhere = ""\n\tPdDvLn = "logical_volume/vgsubclass/vgtype"' ]] :cl_update_vg_odm_ts(0.011)[272] : Get the vgid for volume group datavg :cl_update_vg_odm_ts(0.011)[274] getlvodm -v datavg :cl_update_vg_odm_ts(0.014)[274] vgid=00c44af100004b00000001851e9dc053 :cl_update_vg_odm_ts(0.014)[280] : Get the volume group timestamp for datavg :cl_update_vg_odm_ts(0.014)[281] : as currently saved in ODM :cl_update_vg_odm_ts(0.014)[283] getlvodm -T 00c44af100004b00000001851e9dc053 :cl_update_vg_odm_ts(0.016)[283] current_odm_ts=63d4e56032aa2e89 :cl_update_vg_odm_ts(0.016)[288] [[ TRUE != TRUE ]] :cl_update_vg_odm_ts(0.017)[346] : Is an update 'necessary?' :cl_update_vg_odm_ts(0.017)[348] [[ -n 'epprda epprds' ]] :cl_update_vg_odm_ts(0.017)[350] LOCALNODENAME=epprda :cl_update_vg_odm_ts(0.017)[351] LOCALNODENAME=epprda :cl_update_vg_odm_ts(0.017)[352] [[ -n epprda ]] :cl_update_vg_odm_ts(0.017)[355] : Skip the local node, since we have done that above. :cl_update_vg_odm_ts(0.018)[357] print 'epprda epprds' :cl_update_vg_odm_ts(0.020)[357] tr ' ' '\n' :cl_update_vg_odm_ts(0.021)[357] tr , '\n' :cl_update_vg_odm_ts(0.023)[357] grep -v -w -x epprda :cl_update_vg_odm_ts(0.024)[357] paste -s -d, - :cl_update_vg_odm_ts(0.026)[357] node_list=epprds :cl_update_vg_odm_ts(0.027)[365] : Update the time stamp on all those other nodes on which the :cl_update_vg_odm_ts(0.027)[366] : volume group is currently varied off. LVM will take care of :cl_update_vg_odm_ts(0.027)[367] : the others. :cl_update_vg_odm_ts(0.027)[369] [[ -n epprds ]] :cl_update_vg_odm_ts(0.027)[371] cl_on_node -cspoc '-f -n epprds' 'lsvg -o | grep -qx datavg || /usr/sbin/putlvodm -T 63d4e56032aa2e89 00c44af100004b00000001851e9dc053 && /usr/sbin/savebase > /dev/null' :cl_update_vg_odm_ts(0.027)[371] _CSPOC_CALLED_FROM_SMIT=true clhaver[576]: version 1.14 clhaver[591]: colon delimied output clhaver[612]: MINVER=6100 clhaver[624]: thread(epprds) clhaver[144]: cl_gethostbynode epprds cl_gethostbynode[102]: version 1.1 i_flag=0 given name is epprds cl_gethostbynode[127]: cl_query nodes=2 cl_gethostbynode[161]: epprds is a PowerHA node name cl_gethostbynode[313]: epprds is the CAA host matching PowerHA node epprds clhaver[157]: node epprds resolves to epprds clhaver[166]: cl_socket(COLLVER epprds epprds) clhaver[191]: cl_connect(epprds) clhaver[230]: read(epprds) epprds: :cl_rsh[99] version=1.4 epprds: :cl_rsh[102] CAA_node_name='' epprds: :cl_rsh[105] : Process optional flags epprds: :cl_rsh[107] cmd_flag=-n epprds: :cl_rsh[108] [[ -n == -n ]] epprds: :cl_rsh[111] : Remove the no standard input flag epprds: :cl_rsh[113] shift epprds: :cl_rsh[124] : Pick up and check the input epprds: :cl_rsh[126] print 'epprds /usr/es/sbin/cluster/cspoc/cexec eval gmhdhgghcacngpcahmcaghhcgfhacacnhbhicagegbhegbhgghcahmhmcacphfhdhccphdgcgjgocphahfhegmhggpgegncacnfecadgddgedegfdfdgdadddcgbgbdcgfdidjcadadagddedegbggdbdadadadadegcdadadadadadadadbdidfdbgfdjgegddadfddcacgcgcacphfhdhccphdgcgjgocphdgbhggfgcgbhdgfcadocacpgegfhgcpgohfgmgm' epprds: :cl_rsh[126] read destination command epprds: :cl_rsh[127] [[ -z epprds ]] epprds: :cl_rsh[127] [[ -z '/usr/es/sbin/cluster/cspoc/cexec eval gmhdhgghcacngpcahmcaghhcgfhacacnhbhicagegbhegbhgghcahmhmcacphfhdhccphdgcgjgocphahfhegmhggpgegncacnfecadgddgedegfdfdgdadddcgbgbdcgfdidjcadadagddedegbggdbdadadadadegcdadadadadadadadbdidfdbgfdjgegddadfddcacgcgcacphfhdhccphdgcgjgocphdgbhggfgcgbhdgfcadocacpgegfhgcpgohfgmgm' ]] epprds: :cl_rsh[136] /usr/es/sbin/cluster/utilities/cl_nn2hn epprds epprds: :cl_nn2hn[83] version=1.11 epprds: :cl_nn2hn[86] CAA_host_name='' epprds: :cl_nn2hn[86] typeset CAA_host_name epprds: :cl_nn2hn[87] node_name='' epprds: :cl_nn2hn[87] typeset node_name epprds: :cl_nn2hn[88] node_interfaces='' epprds: :cl_nn2hn[88] typeset node_interfaces epprds: :cl_nn2hn[89] COMM_PATH='' epprds: :cl_nn2hn[89] typeset COMM_PATH epprds: :cl_nn2hn[90] r_flag='' epprds: :cl_nn2hn[90] typeset r_flag epprds: :cl_nn2hn[93] : Pick up and check the input epprds: :cl_nn2hn[95] getopts r option epprds: :cl_nn2hn[106] : Pick up the destination, which follows the options epprds: :cl_nn2hn[108] shift 0 epprds: :cl_nn2hn[109] destination=epprds epprds: :cl_nn2hn[109] typeset destination epprds: :cl_nn2hn[111] [[ -z epprds ]] epprds: :cl_nn2hn[121] : In order to prevent recursion, first you must prevent recursion... epprds: :cl_nn2hn[123] [[ '' != TRUE ]] epprds: :cl_nn2hn[126] : This routine is not being called from cl_query_hn_id, so call it epprds: :cl_nn2hn[127] : to see if it can find the CAA host name based on a common short epprds: :cl_nn2hn[128] : id, or match on CAA host name, or match on CAA short name, or epprds: :cl_nn2hn[129] : similar match in /etc/cluster/rhosts. epprds: :cl_nn2hn[131] cl_query_hn_id -q -i epprds epprds: cl_query_hn_id[137]: version 1.2 epprds: cl_gethostbynode[102]: version 1.1 i_flag=105 given name is epprds epprds: cl_gethostbynode[127]: cl_query nodes=2 epprds: cl_gethostbynode[161]: epprds is a PowerHA node name epprds: cl_gethostbynode[313]: epprds is the CAA host matching PowerHA node epprds epprds: :cl_nn2hn[131] CAA_host_name=epprds epprds: :cl_nn2hn[132] RC=0 epprds: :cl_nn2hn[133] (( 0 == 0 )) epprds: :cl_nn2hn[136] : The straight forward tests worked! epprds: :cl_nn2hn[138] [[ epprds == @(+([0-9.])|+([0-9:])) ]] epprds: :cl_nn2hn[159] [[ -z epprds ]] epprds: :cl_nn2hn[340] [[ -z epprds ]] epprds: :cl_nn2hn[345] [[ -n epprds ]] epprds: :cl_nn2hn[348] : We have found epprds is our best guess at a CAA host name epprds: :cl_nn2hn[349] : corresponding to epprds epprds: :cl_nn2hn[351] print epprds epprds: :cl_nn2hn[352] return 0 epprds: :cl_rsh[136] CAA_node_name=epprds epprds: :cl_rsh[148] : Invoke clcomd epprds: :cl_rsh[150] /usr/sbin/clrsh epprds -n '/usr/es/sbin/cluster/cspoc/cexec eval gmhdhgghcacngpcahmcaghhcgfhacacnhbhicagegbhegbhgghcahmhmcacphfhdhccphdgcgjgocphahfhegmhggpgegncacnfecadgddgedegfdfdgdadddcgbgbdcgfdidjcadadagddedegbggdbdadadadadegcdadadadadadadadbdidfdbgfdjgegddadfddcacgcgcacphfhdhccphdgcgjgocphdgbhggfgcgbhdgfcadocacpgegfhgcpgohfgmgm' epprds: :cl_rsh[151] return 0 :cl_update_vg_odm_ts(0.500)[375] return 0 :node_down_complete[297] : If VG fencing is in place, restore the fence height to read/only. :node_down_complete[299] cl_set_vg_fence_height -c datavg ro cl_set_vg_fence_height[126]: version @(#)10 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 cl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY) cl_set_vg_fence_height[214]: read(datavg, 16) cl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=ro(2)) :node_down_complete[300] RC=0 :node_down_complete[301] : return code from volume group fencing is 0 :node_down_complete[302] (( 0 != 0 )) :node_down_complete[315] : remove the flag file used to indicate reconfig_resources :node_down_complete[317] rm -f /usr/es/sbin/cluster/etc/.hacmp_wlm_config_changed :node_down_complete[320] : Run WLM stop script :node_down_complete[322] cl_wlm_stop :cl_wlm_stop[+55] version=%I% :cl_wlm_stop[+59] :cl_wlm_stop[+59] clwlmruntime -l :cl_wlm_stop[+59] awk BEGIN { FS = ":" } $1 !~ /^#.*/ { print $1 } HA_WLM_CONFIG=HA_WLM_config :cl_wlm_stop[+60] [[ -z HA_WLM_config ]] :cl_wlm_stop[+69] wlmcntrl -q WLM is stopped :cl_wlm_stop[+70] WLM_IS_RUNNING=1 :cl_wlm_stop[+72] WLM_CONFIG_FILES=classes shares limits rules :cl_wlm_stop[+74] PREV_WLM_CONFIG= :cl_wlm_stop[+76] HA_STARTED_WLM=false :cl_wlm_stop[+78] [[ -e /etc/wlm/HA_WLM_config/HA_prev_config_subdir ]] :cl_wlm_stop[+86] [[ -e /etc/wlm/HA_WLM_config/classes.prev ]] :cl_wlm_stop[+86] [[ -e /etc/wlm/HA_WLM_config/shares.prev ]] :cl_wlm_stop[+86] [[ -e /etc/wlm/HA_WLM_config/limits.prev ]] :cl_wlm_stop[+86] [[ -e /etc/wlm/HA_WLM_config/rules.prev ]] :cl_wlm_stop[+107] [[ -n ]] :cl_wlm_stop[+107] [[ true = false ]] :cl_wlm_stop[+144] exit 0 :node_down_complete[330] [[ epprda == epprda ]] :node_down_complete[333] : Node is down: Create the lock file that inhibits node halt :node_down_complete[335] /bin/touch /usr/es/sbin/cluster/etc/ha_nodehalt.lock :node_down_complete[339] : If this is the last node to leave, restore read write access to all volume groups :node_down_complete[341] [[ '' != forced ]] :node_down_complete[343] [[ -z epprds ]] :node_down_complete[392] [[ epprda == epprda ]] :node_down_complete[395] : Node is gracefully going down. :node_down_complete[397] clodmget -n -q policy=scsi -f value HACMPsplitmerge :node_down_complete[397] SCSIPR_ENABLED='' :node_down_complete[397] typeset SCSIPR_ENABLED :node_down_complete[398] [[ '' == Yes ]] :node_down_complete[452] : refresh clcomd, FWIW :node_down_complete[454] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. :node_down_complete[459] : This is the final info of all RGs: :node_down_complete[461] clRGinfo -p -t :node_down_complete[461] 2>& 1 clRGinfo[431]: version I clRGinfo[517]: Number of resource groups = 0 clRGinfo[562]: cluster epprda_cluster is version = 22 clRGinfo[597]: no resource groups specified on command line - print all clRGinfo[685]: Current group is 'epprd_rg' get primary state info for state 4 get secondary state info for state 4 getPrimaryStateStr: using primary_table => primary_state_table get primary state info for state 4 get secondary state info for state 4 getPrimaryStateStr: using primary_table => primary_state_table Cluster Name: epprda_cluster Resource Group Name: epprd_rg Node Group State Delayed Timers ---------------------------------------------------------------- --------------- ------------------- epprda OFFLINE epprds OFFLINE :node_down_complete[463] return 0 Jan 28 2023 18:05:53 EVENT COMPLETED: node_down_complete epprda 0 |2023-01-28T18:05:53|1584|EVENT COMPLETED: node_down_complete epprda 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:05:53.540739 + echo '|2023-01-28T18:05:53.540739|INFO: node_down_complete|epprda|0' + 1>> /var/hacmp/availability/clavailability.log clexit.rc : Normal termination of clstrmgrES. Restart now. 0513-059 The clstrmgrES Subsystem has been started. Subsystem PID is 26673452. Jan 28 2023 18:06:40 EVENT START: admin_op clrm_start_request 12509 0 |2023-01-28T18:06:40|12509|EVENT START: admin_op clrm_start_request 12509 0| :admin_op[110] trap sigint_handler INT :admin_op[116] OP_TYPE=clrm_start_request :admin_op[116] typeset OP_TYPE :admin_op[117] SERIAL=12509 :admin_op[117] typeset -li SERIAL :admin_op[118] INVALID=0 :admin_op[118] typeset -li INVALID The administrator initiated the following action at Sat Jan 28 18:06:40 KORST 2023 Check smit.log and clutils.log for additional details. Starting PowerHA cluster services on node: epprda in normal mode... Jan 28 2023 18:06:43 EVENT COMPLETED: admin_op clrm_start_request 12509 0 0 |2023-01-28T18:06:43|12509|EVENT COMPLETED: admin_op clrm_start_request 12509 0 0| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 12510 Cluster services started on node 'epprda' Enqueued rg_move acquire event for resource group epprd_rg. Node Up Completion Event has been enqueued. ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_NODE_DEP|2023-01-28T18:06:45|12510| |CLUSTER_RG_MOVE_ACQUIRE|epprd_rg| |NODE_UP_COMPLETE| |EVENT_PREAMBLE_END| Jan 28 2023 18:06:47 EVENT START: node_up epprda |2023-01-28T18:06:47|12510|EVENT START: node_up epprda| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:06:47.807626 + echo '|2023-01-28T18:06:47.807626|INFO: node_up|epprda' + 1>> /var/hacmp/availability/clavailability.log :node_up[182] version=%I% :node_up[185] NODENAME=epprda :node_up[185] export NODENAME :node_up[193] STATUS=0 :node_up[193] typeset -li STATUS :node_up[194] RC=0 :node_up[194] typeset -li RC :node_up[195] ENABLE_NFS_CROSS_MOUNT=false :node_up[196] START_MODE='' :node_up[196] typeset START_MODE :node_up[198] set -u :node_up[200] (( 1 < 1 )) :node_up[200] (( 1 > 2 )) :node_up[207] : serial number for this event is 12510 :node_up[210] [[ epprda == epprda ]] :node_up[213] : Remove the node halt lock file. :node_up[214] : Hereafter, clstrmgr failure leads to node halt :node_up[216] rm -f /usr/es/sbin/cluster/etc/ha_nodehalt.lock :node_up[219] (( 1 > 1 )) :node_up[256] : If RG_DEPENDENCIES=false, process RGs with clsetenvgrp :node_up[258] [[ TRUE == FALSE ]] :node_up[281] : localnode processing prior to RG acquisition :node_up[283] [[ epprda == epprda ]] :node_up[283] [[ '' != forced ]] :node_up[286] : Reserve Volume Groups using SCSIPR :node_up[288] clodmget -n -q policy=scsi -f value HACMPsplitmerge :node_up[288] SCSIPR_ENABLED='' :node_up[288] typeset SCSIPR_ENABLED :node_up[289] [[ '' == Yes ]] :node_up[334] : Setup VG fencing. This must be done prior to any potential disk access. :node_up[336] node_up_vg_fence_init :node_up[node_up_vg_fence_init:73] typeset VGs_on_line :node_up[node_up_vg_fence_init:74] typeset VG_name :node_up[node_up_vg_fence_init:75] typeset VG_ID :node_up[node_up_vg_fence_init:76] typeset VG_PV_list :node_up[node_up_vg_fence_init:79] : Find out what volume groups are currently on-line :node_up[node_up_vg_fence_init:81] lsvg -L -o :node_up[node_up_vg_fence_init:81] 2> /var/hacmp/log/node_up.lsvg.err :node_up[node_up_vg_fence_init:81] print caavg_private rootvg :node_up[node_up_vg_fence_init:81] VGs_on_line='caavg_private rootvg' :node_up[node_up_vg_fence_init:82] [[ -e /var/hacmp/log/node_up.lsvg.err ]] :node_up[node_up_vg_fence_init:82] [[ ! -s /var/hacmp/log/node_up.lsvg.err ]] :node_up[node_up_vg_fence_init:82] rm /var/hacmp/log/node_up.lsvg.err :node_up[node_up_vg_fence_init:85] : Clean up any old fence group files and stale fence groups. :node_up[node_up_vg_fence_init:86] : These are all of the form '/usr/es/sbin/cluster/etc/vg/.uud' :node_up[node_up_vg_fence_init:88] valid_vg_lst='' :node_up[node_up_vg_fence_init:89] lsvg -L :node_up[node_up_vg_fence_init:89] egrep -vw 'rootvg|caavg_private' :node_up[node_up_vg_fence_init:89] 2>> /var/hacmp/log/node_up.lsvg.err :node_up:datavg[node_up_vg_fence_init:91] PS4_LOOP=datavg :node_up:datavg[node_up_vg_fence_init:92] clodmget -q $'name like \'*VOLUME_GROUP\' and value = datavg' -f value -n HACMPresource :node_up:datavg[node_up_vg_fence_init:92] [[ -z datavg ]] :node_up:datavg[node_up_vg_fence_init:109] : Volume group datavg is an HACMP resource :node_up:datavg[node_up_vg_fence_init:111] [[ 'caavg_private rootvg' == ?(*\ )datavg?(\ *) ]] :node_up:datavg[node_up_vg_fence_init:115] fence_height=ro :node_up:datavg[node_up_vg_fence_init:119] : Recreate the fence group to match current volume group membership :node_up:datavg[node_up_vg_fence_init:121] cl_vg_fence_redo -c datavg ro :cl_vg_fence_redo[52] version=1.3 :cl_vg_fence_redo[55] RC=0 :cl_vg_fence_redo[55] typeset -li RC :cl_vg_fence_redo[58] : Check for optional -c parameter :cl_vg_fence_redo[60] [[ -c == -c ]] :cl_vg_fence_redo[62] c_flag=-c :cl_vg_fence_redo[63] shift :cl_vg_fence_redo[66] VG=datavg :cl_vg_fence_redo[67] UUID_file=/usr/es/sbin/cluster/etc/vg/datavg.uuid :cl_vg_fence_redo[68] fence_height=ro :cl_vg_fence_redo[70] [[ -s /usr/es/sbin/cluster/etc/vg/datavg.uuid ]] :cl_vg_fence_redo[83] [[ -z ro ]] :cl_vg_fence_redo[98] : Rebuild the fence group for datavg :cl_vg_fence_redo[99] : First, find the disks in the volume group :cl_vg_fence_redo[101] /usr/sbin/getlvodm -v datavg :cl_vg_fence_redo[101] VGID=00c44af100004b00000001851e9dc053 :cl_vg_fence_redo[103] [[ -n 00c44af100004b00000001851e9dc053 ]] :cl_vg_fence_redo[106] : Create a fence group for datavg :cl_vg_fence_redo[108] /usr/sbin/getlvodm -w 00c44af100004b00000001851e9dc053 :cl_vg_fence_redo[108] cut -f2 '-d ' :cl_vg_fence_redo[108] PV_disk_list=$'hdisk2\nhdisk3\nhdisk4\nhdisk5\nhdisk6\nhdisk7\nhdisk8' :cl_vg_fence_redo[109] cl_vg_fence_init -c datavg ro hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8 cl_vg_fence_init[145]: version @(#) 7d4c34b 43haes/usr/sbin/cluster/events/utils/cl_vg_fence_init.c, 726, 2147A_aha726, Feb 05 2021 09:50 PM cl_vg_fence_init[204]: odm_initialize() cl_vg_fence_init[231]: calloc(7, 64) cl_vg_fence_init[259]: getattr(hdisk2, PCM) = PCM/friend/fcpother cl_vg_fence_init[259]: getattr(hdisk3, PCM) = PCM/friend/fcpother cl_vg_fence_init[259]: getattr(hdisk4, PCM) = PCM/friend/fcpother cl_vg_fence_init[259]: getattr(hdisk5, PCM) = PCM/friend/fcpother cl_vg_fence_init[259]: getattr(hdisk6, PCM) = PCM/friend/fcpother cl_vg_fence_init[259]: getattr(hdisk7, PCM) = PCM/friend/fcpother cl_vg_fence_init[259]: getattr(hdisk8, PCM) = PCM/friend/fcpother cl_vg_fence_init[294]: sfwAddFenceGroup(datavg, 7, hdisk2, hdisk3, hdisk4, hdisk5, hdisk6, hdisk7, hdisk8) cl_vg_fence_init[374]: free(200101b8) cl_vg_fence_init[400]: creat(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_vg_fence_init[408]: write(/usr/es/sbin/cluster/etc/vg/datavg.uuid, 16) cl_g_fence_init[442]: sfwSetFenceGroup(vg=datavg, height=ro(2) uuid=ec2db4422261eae02091227fb9e53c88):cl_vg_fence_redo[110] RC=0 :cl_vg_fence_redo[111] : Exit status is 0 from cl_vg_fence_init datavg ro hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8 :cl_vg_fence_redo[113] (( 0 != 0 )) :cl_vg_fence_redo[123] return 0 :node_up:datavg[node_up_vg_fence_init:122] valid_vg_lst=' datavg' :node_up:datavg[node_up_vg_fence_init:125] [[ -e /var/hacmp/log/node_up.lsvg.err ]] :node_up:datavg[node_up_vg_fence_init:125] [[ ! -s /var/hacmp/log/node_up.lsvg.err ]] :node_up:datavg[node_up_vg_fence_init:125] rm /var/hacmp/log/node_up.lsvg.err :node_up:datavg[node_up_vg_fence_init:128] : Any remaining old fence group files are from stale fence groups, :node_up:datavg[node_up_vg_fence_init:129] : so remove them :node_up:datavg[node_up_vg_fence_init:131] [[ -s /usr/es/sbin/cluster/etc/vg/datavg.uuid ]] :node_up:datavg[node_up_vg_fence_init:133] ls /usr/es/sbin/cluster/etc/vg/datavg.uuid :node_up:/usr/es/sbin/cluster/etc/vg/datavg.uuid[node_up_vg_fence_init:135] PS4_LOOP=/usr/es/sbin/cluster/etc/vg/datavg.uuid :node_up:/usr/es/sbin/cluster/etc/vg/datavg.uuid[node_up_vg_fence_init:136] VG_name=datavg.uuid :node_up:/usr/es/sbin/cluster/etc/vg/datavg.uuid[node_up_vg_fence_init:137] VG_name=datavg :node_up:/usr/es/sbin/cluster/etc/vg/datavg.uuid[node_up_vg_fence_init:138] [[ ' datavg' == ?(*\ )datavg?(\ *) ]] :node_up:/usr/es/sbin/cluster/etc/vg/datavg.uuid[node_up_vg_fence_init:141] : Just redid the fence group for datavg :node_up:/usr/es/sbin/cluster/etc/vg/datavg.uuid[node_up_vg_fence_init:143] continue :node_up:/usr/es/sbin/cluster/etc/vg/datavg.uuid[node_up_vg_fence_init:158] unset PS4_LOOP :node_up[node_up_vg_fence_init:160] return 0 :node_up[344] : If WLM manager clases have been configured for an application server, process them now :node_up[346] clodmget -q $'name like \'WLM_*\'' -f id HACMPresource :node_up[346] [[ -n '' ]] :node_up[371] : Call ss-load replicated resource methods if they are defined :node_up[373] cl_rrmethods2call ss_load :cl_rrmethods2call[56] version=%I% :cl_rrmethods2call[84] RRMETHODS='' :cl_rrmethods2call[85] NEED_RR_ENV_VARS=no :cl_rrmethods2call[104] : The load and unload methods if defined are returned on the :cl_rrmethods2call[105] : local node :cl_rrmethods2call[107] [[ epprda == epprda ]] :cl_rrmethods2call[109] NEED_RR_ENV_VARS=yes :cl_rrmethods2call[129] : Set the '*_REP_RESOURCE' variables if needed. :cl_rrmethods2call[131] [[ yes == yes ]] :cl_rrmethods2call[133] cllsres :cl_rrmethods2call[133] 2> /dev/null :cl_rrmethods2call[133] eval APPLICATIONS='"epprd_app"' EXPORT_FILESYSTEM='"/board_org' '/sapmnt/EPP"' FILESYSTEM='""' FORCED_VARYON='"false"' FSCHECK_TOOL='"fsck"' FS_BEFORE_IPADDR='"false"' MOUNT_FILESYSTEM='"/board;/board_org"' RECOVERY_METHOD='"sequential"' SERVICE_LABEL='"epprd"' SSA_DISK_FENCING='"false"' VG_AUTO_IMPORT='"false"' VOLUME_GROUP='"datavg"' USERDEFINED_RESOURCES='""' :cl_rrmethods2call[1] APPLICATIONS=epprd_app :cl_rrmethods2call[1] EXPORT_FILESYSTEM='/board_org /sapmnt/EPP' :cl_rrmethods2call[1] FILESYSTEM='' :cl_rrmethods2call[1] FORCED_VARYON=false :cl_rrmethods2call[1] FSCHECK_TOOL=fsck :cl_rrmethods2call[1] FS_BEFORE_IPADDR=false :cl_rrmethods2call[1] MOUNT_FILESYSTEM='/board;/board_org' :cl_rrmethods2call[1] RECOVERY_METHOD=sequential :cl_rrmethods2call[1] SERVICE_LABEL=epprd :cl_rrmethods2call[1] SSA_DISK_FENCING=false :cl_rrmethods2call[1] VG_AUTO_IMPORT=false :cl_rrmethods2call[1] VOLUME_GROUP=datavg :cl_rrmethods2call[1] USERDEFINED_RESOURCES='' :cl_rrmethods2call[137] [[ -n '' ]] :cl_rrmethods2call[142] [[ -n '' ]] :cl_rrmethods2call[147] [[ -n '' ]] :cl_rrmethods2call[152] [[ -n '' ]] :cl_rrmethods2call[157] [[ -n '' ]] :cl_rrmethods2call[162] [[ -n '' ]] :cl_rrmethods2call[167] [[ -n '' ]] :cl_rrmethods2call[172] [[ -n '' ]] :cl_rrmethods2call[182] [[ -z '' ]] :cl_rrmethods2call[184] typeset sysmgdata :cl_rrmethods2call[185] typeset reposmgdata :cl_rrmethods2call[186] [[ -x /usr/es/sbin/cluster/xd_generic/xd_cli/clxd_list_mg_smit ]] :cl_rrmethods2call[191] [[ -n '' ]] :cl_rrmethods2call[191] [[ -n '' ]] :cl_rrmethods2call[197] echo '' :cl_rrmethods2call[199] return 0 :node_up[373] METHODS='' :node_up[387] : When the local node is brought up, reset the resource locator info. :node_up[390] clchdaemons -r -d clstrmgr_scripts -t resource_locator :node_up[397] [[ '' != manual ]] :node_up[400] : attempt passive varyon for any ECM VGs in serial RGs :node_up[405] cl_pvo :cl_pvo[590] version=1.34.2.12 :cl_pvo(0.007)[592] PS4_TIMER=true :cl_pvo(0.007)[594] rc=0 :cl_pvo(0.007)[594] typeset -li rc :cl_pvo(0.007)[595] mode=0 :cl_pvo(0.007)[595] typeset -li mode :cl_pvo(0.007)[600] ENODEV=19 :cl_pvo(0.007)[600] typeset -li ENODEV :cl_pvo(0.007)[601] vg_force_on_flag='' :cl_pvo(0.007)[605] : Pick up any passed options :cl_pvo(0.007)[607] rg_list='' :cl_pvo(0.007)[607] export rg_list :cl_pvo(0.008)[608] vg_list='' :cl_pvo(0.008)[609] fs_list='' :cl_pvo(0.008)[610] all_vgs_flag='' :cl_pvo(0.008)[611] [[ -z '' ]] :cl_pvo(0.008)[613] all_vgs_flag=true :cl_pvo(0.008)[615] getopts :g:v:f: option :cl_pvo(0.008)[629] shift 0 :cl_pvo(0.008)[630] [[ -n '' ]] :cl_pvo(0.008)[645] O_flag='' :cl_pvo(0.008)[646] odmget -q 'attribute = varyon_state' PdAt :cl_pvo(0.010)[646] [[ -n $'\nPdAt:\n\tuniquetype = "logical_volume/vgsubclass/vgtype"\n\tattribute = "varyon_state"\n\tdeflt = "0"\n\tvalues = "0,1,2,3"\n\twidth = ""\n\ttype = "R"\n\tgeneric = ""\n\trep = "l"\n\tnls_index = 0' ]] :cl_pvo(0.010)[649] : LVM may record that a volume group was varied on from an earlier :cl_pvo(0.010)[650] : IPL. Rely on HA state tracking, and override the LVM check :cl_pvo(0.010)[652] O_flag=-O :cl_pvo(0.010)[655] [[ -n true ]] :cl_pvo(0.010)[657] [[ -z epprda ]] :cl_pvo(0.010)[661] [[ -z epprda ]] :cl_pvo(0.010)[672] : Since no resource names of any type were explicitly passed, go :cl_pvo(0.010)[673] : find all the resource groups this node is a member of. :cl_pvo(0.012)[675] clodmget -f group,nodes HACMPgroup :cl_pvo(0.015)[675] egrep '[: ]epprda( |$)' :cl_pvo(0.015)[675] cut -f1 -d: :cl_pvo(0.019)[675] rg_list=epprd_rg :cl_pvo(0.019)[676] [[ -z epprd_rg ]] :cl_pvo(0.019)[686] [[ -z '' ]] :cl_pvo(0.019)[686] [[ -n epprd_rg ]] :cl_pvo(0.019)[689] : Since no volume groups were passed, go find all the volume groups :cl_pvo(0.019)[690] : in the given/extracted list of resource groups. :cl_pvo(0.019)[695] : For each resource group that this node participates in, get the :cl_pvo(0.019)[696] : list of serial access volume groups in that resource group. :cl_pvo(0.019)[698] clodmget -q 'group = epprd_rg and name = VOLUME_GROUP' -f value -n HACMPresource :cl_pvo(0.022)[698] rg_vg_list=datavg :cl_pvo(0.022)[700] [[ -n datavg ]] :cl_pvo(0.022)[702] [[ -n true ]] :cl_pvo(0.022)[703] odmget -q $'group = epprd_rg and name like \'*REP_RESOURCE\'' HACMPresource :cl_pvo(0.024)[703] [[ -n '' ]] :cl_pvo(0.024)[739] : If there were any serial access volume groups for this node and :cl_pvo(0.024)[740] : that resource group, add them to the list. :cl_pvo(0.024)[742] vg_list=datavg :cl_pvo(0.024)[747] [[ -z '' ]] :cl_pvo(0.024)[747] [[ -n epprd_rg ]] :cl_pvo(0.024)[750] : Since no file systems were passed, go find all the file systems in :cl_pvo(0.024)[751] : the given/extracted list of resource groups. :cl_pvo(0.024)[755] : For each resource group that this node participates in, get the :cl_pvo(0.024)[756] : list of file systems in that resource group. :cl_pvo(0.024)[761] clodmget -q 'group = epprd_rg and name = FILESYSTEM' -f value -n HACMPresource :cl_pvo(0.027)[761] rg_fs_list=ALL :cl_pvo(0.027)[763] [[ -n ALL ]] :cl_pvo(0.027)[765] [[ -n true ]] :cl_pvo(0.027)[766] odmget -q $'group = epprd_rg and name like \'*REP_RESOURCE\'' HACMPresource :cl_pvo(0.030)[766] [[ -n '' ]] :cl_pvo(0.030)[780] : If there were any file systems for this node and that resource :cl_pvo(0.030)[781] : group, add them to the list :cl_pvo(0.030)[783] fs_list=ALL :cl_pvo(0.030)[790] [[ ALL == ALL ]] :cl_pvo(0.030)[792] continue :cl_pvo(0.030)[801] : Remove any duplicates from the volume group list :cl_pvo(0.031)[803] echo datavg :cl_pvo(0.033)[803] tr ' ' '\n' :cl_pvo(0.034)[803] sort -u :cl_pvo(0.039)[803] vg_list=datavg :cl_pvo(0.039)[805] [[ -z datavg ]] :cl_pvo(0.039)[814] : Find out what volume groups are currently on-line :cl_pvo(0.039)[816] lsvg -L -o :cl_pvo(0.040)[816] 2> /tmp/lsvg.err :cl_pvo(0.042)[816] print caavg_private rootvg :cl_pvo(0.042)[816] ON_LIST='caavg_private rootvg' :cl_pvo(0.042)[819] : If this node is the first node up in the cluster, :cl_pvo(0.042)[820] : we want to do a sync for each of the volume groups :cl_pvo(0.042)[821] : we bring on-line. If multiple cluster nodes are already active, the :cl_pvo(0.042)[822] : sync is unnecesary, having been done once, and possibly disruptive. :cl_pvo(0.042)[824] [[ -n '' ]] :cl_pvo(0.042)[833] : No other cluster nodes are present, default to sync just to be sure :cl_pvo(0.042)[834] : the volume group is in a good state :cl_pvo(0.042)[836] syncflag='' :cl_pvo(0.042)[840] : Now, process each volume group in the list of those this node acceses. :cl_pvo(0.042):datavg[844] PS4_LOOP=datavg :cl_pvo(0.042):datavg[844] typeset PS4_LOOP :cl_pvo(0.042):datavg[846] : Skip any concurrent GMVGs, they should never be pvo. :cl_pvo(0.042):datavg[848] odmget -q name='GMVG_REP_RESOURCE AND value=datavg' HACMPresource :cl_pvo(0.045):datavg[848] [[ -n '' ]] :cl_pvo(0.045):datavg[853] : The VGID is what the LVM low level commands used below use to :cl_pvo(0.045):datavg[854] : identify the volume group. :cl_pvo(0.045):datavg[856] /usr/sbin/getlvodm -v datavg :cl_pvo(0.048):datavg[856] vgid=00c44af100004b00000001851e9dc053 :cl_pvo(0.048):datavg[860] mode=99 :cl_pvo(0.048):datavg[863] : Attempt to determine the mode of the volume group - is it an :cl_pvo(0.048):datavg[864] : enhanced concurrent mode volume group or not. :cl_pvo(0.048):datavg[868] export mode :cl_pvo(0.048):datavg[869] hdisklist='' :cl_pvo(0.048):datavg[870] /usr/sbin/getlvodm -w 00c44af100004b00000001851e9dc053 :cl_pvo(0.051):datavg[870] read pvid hdisk :cl_pvo(0.051):datavg[871] hdisklist=hdisk2 :cl_pvo(0.051):datavg[870] read pvid hdisk :cl_pvo(0.051):datavg[871] hdisklist='hdisk2 hdisk3' :cl_pvo(0.051):datavg[870] read pvid hdisk :cl_pvo(0.051):datavg[871] hdisklist='hdisk2 hdisk3 hdisk4' :cl_pvo(0.051):datavg[870] read pvid hdisk :cl_pvo(0.051):datavg[871] hdisklist='hdisk2 hdisk3 hdisk4 hdisk5' :cl_pvo(0.051):datavg[870] read pvid hdisk :cl_pvo(0.051):datavg[871] hdisklist='hdisk2 hdisk3 hdisk4 hdisk5 hdisk6' :cl_pvo(0.051):datavg[870] read pvid hdisk :cl_pvo(0.051):datavg[871] hdisklist='hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7' :cl_pvo(0.051):datavg[870] read pvid hdisk :cl_pvo(0.051):datavg[871] hdisklist='hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8' :cl_pvo(0.051):datavg[870] read pvid hdisk :cl_pvo(0.051):datavg[873] get_vg_mode 'hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8' 00c44af100004b00000001851e9dc053 datavg :cl_pvo(0.051):datavg[get_vg_mode:289] typeset vgid vg_name syncflag hdisklist :cl_pvo(0.051):datavg[get_vg_mode:290] typeset GROUP_NAME FORCED_VARYON :cl_pvo(0.051):datavg[get_vg_mode:291] TUR_RC=0 :cl_pvo(0.051):datavg[get_vg_mode:291] typeset -li TUR_RC :cl_pvo(0.051):datavg[get_vg_mode:292] vg_disks=0 :cl_pvo(0.051):datavg[get_vg_mode:292] typeset -li vg_disks :cl_pvo(0.051):datavg[get_vg_mode:293] max_disk_test=0 :cl_pvo(0.051):datavg[get_vg_mode:293] typeset -li max_disk_test :cl_pvo(0.051):datavg[get_vg_mode:294] disk_tested=0 :cl_pvo(0.051):datavg[get_vg_mode:294] typeset -li disk_tested :cl_pvo(0.051):datavg[get_vg_mode:296] hdisklist='hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8' :cl_pvo(0.051):datavg[get_vg_mode:297] vgid=00c44af100004b00000001851e9dc053 :cl_pvo(0.051):datavg[get_vg_mode:298] vg_name=datavg :cl_pvo(0.051):datavg[get_vg_mode:299] syncflag='' :cl_pvo(0.051):datavg[get_vg_mode:301] odmget -q name='datavg and attribute=conc_capable and value=y' CuAt :cl_pvo(0.052):datavg[get_vg_mode:301] ODMDIR=/etc/objrepos :cl_pvo(0.054):datavg[get_vg_mode:301] [[ -n $'\nCuAt:\n\tname = "datavg"\n\tattribute = "conc_capable"\n\tvalue = "y"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "l"\n\tnls_index = 0' ]] :cl_pvo(0.054):datavg[get_vg_mode:304] : If LVM thinks that this volume group is concurrent capable, that :cl_pvo(0.054):datavg[get_vg_mode:305] : is good enough :cl_pvo(0.054):datavg[get_vg_mode:307] mode=32 :cl_pvo(0.054):datavg[get_vg_mode:308] return :cl_pvo(0.054):datavg[876] : See if the volume group is already on line. This should :cl_pvo(0.054):datavg[877] : only happen if it were manually brought on line outside of HACMP :cl_pvo(0.054):datavg[878] : control, or left on-line after a forced down. :cl_pvo(0.054):datavg[880] vg_on_mode='' :cl_pvo(0.054):datavg[880] typeset vg_on_mode :cl_pvo(0.054):datavg[881] [[ 'caavg_private rootvg' == ?(*\ )datavg?(\ *) ]] :cl_pvo(0.055):datavg[891] lsvg -L datavg :cl_pvo(0.055):datavg[891] 2> /dev/null :cl_pvo(0.055):datavg[891] grep -q -i -w passive-only :cl_pvo(0.060):datavg[896] [[ -n '' ]] :cl_pvo(0.060):datavg[976] : Volume group is currently not on line in any mode :cl_pvo(0.060):datavg[978] (( 99 == 32 )) :cl_pvo(0.060):datavg[1041] (( 32 != 32 && 99 != 32 )) :cl_pvo(0.060):datavg[1060] (( 32 == 32 )) :cl_pvo(0.060):datavg[1063] : If this is actually an enhanced concurrent mode volume group, :cl_pvo(0.060):datavg[1064] : bring it on line in passive mode. Other kinds are just skipped. :cl_pvo(0.060):datavg[1066] varyonp datavg 'hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8' :cl_pvo(0.060):datavg[varyonp:417] NOQUORUM=20 :cl_pvo(0.060):datavg[varyonp:417] typeset -li NOQUORUM :cl_pvo(0.060):datavg[varyonp:418] rc=0 :cl_pvo(0.060):datavg[varyonp:418] typeset -li rc :cl_pvo(0.060):datavg[varyonp:421] : Pick up passed parameters: volume group and sync flag :cl_pvo(0.060):datavg[varyonp:423] typeset syncflag hdisklist vg :cl_pvo(0.060):datavg[varyonp:424] vg=datavg :cl_pvo(0.060):datavg[varyonp:425] hdisklist='hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8' :cl_pvo(0.060):datavg[varyonp:426] syncflag='' :cl_pvo(0.060):datavg[varyonp:429] : Make sure the volume group is not fenced. Varyon requires read write :cl_pvo(0.060):datavg[varyonp:430] : access. :cl_pvo(0.060):datavg[varyonp:432] cl_set_vg_fence_height -c datavg rw cl_set_vg_fence_height[126]: version @(#)10 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 cl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY) cl_set_vg_fence_height[214]: read(datavg, 16) cl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0)) :cl_pvo(0.063):datavg[varyonp:433] RC=0 :cl_pvo(0.063):datavg[varyonp:434] (( 19 == 0 )) :cl_pvo(0.063):datavg[varyonp:442] : Return code from volume group fencing for datavg is 0 :cl_pvo(0.063):datavg[varyonp:443] (( 0 != 0 )) :cl_pvo(0.063):datavg[varyonp:455] : Try to vary on the volume group in passive concurrent mode :cl_pvo(0.063):datavg[varyonp:457] varyonvg -c -P -O datavg :cl_pvo(0.547):datavg[varyonp:458] rc=0 :cl_pvo(0.547):datavg[varyonp:460] (( 0 != 0 )) :cl_pvo(0.547):datavg[varyonp:483] : exit status of varyonvg -c -P -O datavg is: 0 :cl_pvo(0.547):datavg[varyonp:485] (( 0 == 20 )) :cl_pvo(0.547):datavg[varyonp:505] : If varyon was ultimately unsuccessful, note the error :cl_pvo(0.547):datavg[varyonp:507] (( 0 != 0 )) :cl_pvo(0.547):datavg[varyonp:511] : If varyonvg was successful, try to recover :cl_pvo(0.547):datavg[varyonp:512] : any missing or removed disks :cl_pvo(0.547):datavg[varyonp:514] mr_recovery datavg :cl_pvo(0.547):datavg[mr_recovery:59] vg=datavg :cl_pvo(0.547):datavg[mr_recovery:59] typeset vg :cl_pvo(0.547):datavg[mr_recovery:60] typeset mr_disks :cl_pvo(0.548):datavg[mr_recovery:61] typeset disk_list :cl_pvo(0.548):datavg[mr_recovery:62] typeset hdisk :cl_pvo(0.549):datavg[mr_recovery:64] lsvg -p datavg :cl_pvo(0.549):datavg[mr_recovery:64] 2> /dev/null :cl_pvo(0.552):datavg[mr_recovery:64] grep -iw missing :cl_pvo(0.571):datavg[mr_recovery:64] missing_disks='' :cl_pvo(0.571):datavg[mr_recovery:66] [[ -n '' ]] :cl_pvo(0.572):datavg[mr_recovery:89] lsvg -p datavg :cl_pvo(0.572):datavg[mr_recovery:89] 2> /dev/null :cl_pvo(0.575):datavg[mr_recovery:89] grep -iw removed :cl_pvo(0.593):datavg[mr_recovery:89] removed_disks='' :cl_pvo(0.593):datavg[mr_recovery:91] [[ -n '' ]] :cl_pvo(0.593):datavg[varyonp:518] : Restore the fence height to read only, for passive varyon :cl_pvo(0.593):datavg[varyonp:520] cl_set_vg_fence_height -c datavg ro cl_set_vg_fence_height[126]: version @(#)10 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 cl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY) cl_set_vg_fence_height[214]: read(datavg, 16) cl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=ro(2)) :cl_pvo(0.596):datavg[varyonp:521] RC=0 :cl_pvo(0.596):datavg[varyonp:522] : Return code from volume group fencing for datavg is 0 :cl_pvo(0.596):datavg[varyonp:523] (( 0 != 0 )) :cl_pvo(0.597):datavg[varyonp:533] return 0 :cl_pvo(0.597):datavg[1073] return 0 :node_up[406] : exit status of cl_pvo is: 0 :node_up[422] ls '/dev/vpath*' :node_up[422] 1> /dev/null 2>& 1 :node_up[432] : Configure any split and merge policies. :node_up[434] rm -f /usr/es/sbin/cluster/etc/smm_oflag :node_up[435] [[ -z '' ]] :node_up[438] : If this is the first node up, configure split merge handling. :node_up[440] cl_cfg_sm_rt :cl_cfg_sm_rt[738] version=1.34 :cl_cfg_sm_rt[741] clctrl_rc=0 :cl_cfg_sm_rt[741] typeset -li clctrl_rc :cl_cfg_sm_rt[742] src_rc=0 :cl_cfg_sm_rt[742] typeset -li src_rc :cl_cfg_sm_rt[743] cl_migcheck_rc=0 :cl_cfg_sm_rt[743] typeset -li cl_migcheck_rc :cl_cfg_sm_rt[744] bad_policy='' :cl_cfg_sm_rt[745] SMP='' :cl_cfg_sm_rt[748] : If we are in migration - if all nodes are not up to this level - do not :cl_cfg_sm_rt[749] : attempt any configuration. :cl_cfg_sm_rt[751] clmixver :cl_cfg_sm_rt[751] version=22 :cl_cfg_sm_rt[752] (( 22 < 14 )) :cl_cfg_sm_rt[761] : Retrieve configured policies :cl_cfg_sm_rt[763] clodmget -q 'policy = action' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[763] Action=Reboot :cl_cfg_sm_rt[764] clodmget -q 'policy = split' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[764] Split=None :cl_cfg_sm_rt[765] clodmget -q 'policy = merge' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[765] Merge=Majority :cl_cfg_sm_rt[766] clodmget -q 'policy = tiebreaker' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[766] TieBreaker='' :cl_cfg_sm_rt[767] clodmget -q 'policy = nfs_quorumserver' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[767] nfs_quorumserver='' :cl_cfg_sm_rt[768] clodmget -q 'policy = local_quorumdirectory' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[768] local_quorumdirectory='' :cl_cfg_sm_rt[769] clodmget -q 'policy = remote_quorumdirectory' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[769] remote_quorumdirectory='' :cl_cfg_sm_rt[770] clodmget -q 'policy = anhp' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[770] is_anhp='' :cl_cfg_sm_rt[771] clodmget -q 'policy = scsi' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[771] is_scsi='' :cl_cfg_sm_rt[772] clodmget -q name=clutils.log -f value -n HACMPlogs :cl_cfg_sm_rt[772] CLUTILS_LOG=/var/hacmp/log/clutils.log :cl_cfg_sm_rt[775] : If policies are unset, apply the default policies :cl_cfg_sm_rt[777] Split=None :cl_cfg_sm_rt[778] Merge=Majority :cl_cfg_sm_rt[779] Action=Reboot :cl_cfg_sm_rt[782] : If tiebreaker was a configured policy, be sure that one was defined :cl_cfg_sm_rt[784] [[ -z '' ]] :cl_cfg_sm_rt[786] [[ None == TieBreaker ]] :cl_cfg_sm_rt[790] [[ Majority == TieBreaker ]] :cl_cfg_sm_rt[795] [[ -n '' ]] :cl_cfg_sm_rt[807] : Set up the interlock file for use by smcaactrl. This tells :cl_cfg_sm_rt[808] : smcaactrl to allow the following CAA operations. :cl_cfg_sm_rt[810] date :cl_cfg_sm_rt[810] 1> /usr/es/sbin/cluster/etc/cl_cfg_sm_rt.19530030 :cl_cfg_sm_rt[811] trap 'on_exit $?' EXIT :cl_cfg_sm_rt[814] : Setting up CAA tunable local_merge_policy :cl_cfg_sm_rt[816] typeset -i caa_level :cl_cfg_sm_rt[817] lslpp -l bos.cluster.rte :cl_cfg_sm_rt[817] grep bos.cluster.rte :cl_cfg_sm_rt[817] uniq :cl_cfg_sm_rt[817] awk -F ' ' '{print $2}' :cl_cfg_sm_rt[817] tr -d . :cl_cfg_sm_rt[817] caa_level=725102 :cl_cfg_sm_rt[818] (( 725102 >=7140 )) :cl_cfg_sm_rt[819] configure_local_merge_policy :cl_cfg_sm_rt[configure_local_merge_policy:665] typeset -i clctrl_rc :cl_cfg_sm_rt[configure_local_merge_policy:666] [[ -z '' ]] :cl_cfg_sm_rt[configure_local_merge_policy:666] [[ -z '' ]] :cl_cfg_sm_rt[configure_local_merge_policy:667] capability=0 :cl_cfg_sm_rt[configure_local_merge_policy:667] typeset -i capability :cl_cfg_sm_rt[configure_local_merge_policy:669] cl_get_capabilities -i 6 :cl_cfg_sm_rt[configure_local_merge_policy:669] 2>& 1 :cl_cfg_sm_rt[configure_local_merge_policy:669] caa_sm_capability=$':cl_cfg_sm_rt[configure_local_merge_policy:669] LC_ALL=C\ncl_get_capabilities[178]: version 1.9\ncapability is 6\n\tid: 6 version: 1 flag: 1 ' :cl_cfg_sm_rt[configure_local_merge_policy:670] [[ -n $':cl_cfg_sm_rt[configure_local_merge_policy:669] LC_ALL=C\ncl_get_capabilities[178]: version 1.9\ncapability is 6\n\tid: 6 version: 1 flag: 1 ' ]] :cl_cfg_sm_rt[configure_local_merge_policy:674] : If Sub Cluster Split Merge capability is defined :cl_cfg_sm_rt[configure_local_merge_policy:675] : and globally available, then capability is set to 1 :cl_cfg_sm_rt[configure_local_merge_policy:677] capability='1 ' :cl_cfg_sm_rt[configure_local_merge_policy:680] (( 1 == 1 )) :cl_cfg_sm_rt[configure_local_merge_policy:682] : Sub Cluster Split-Merge capability is available cluster wide :cl_cfg_sm_rt[configure_local_merge_policy:684] [[ Majority != None ]] :cl_cfg_sm_rt[configure_local_merge_policy:686] clctrl -tune -o local_merge_policy=h 1 tunable updated on cluster epprda_cluster. :cl_cfg_sm_rt[configure_local_merge_policy:687] clctrl_rc=0 :cl_cfg_sm_rt[configure_local_merge_policy:688] (( 0 != 0 )) :cl_cfg_sm_rt[configure_local_merge_policy:725] return 0 :cl_cfg_sm_rt[820] rc=0 :cl_cfg_sm_rt[820] typeset -i rc :cl_cfg_sm_rt[821] (( 0 < 0 )) :cl_cfg_sm_rt[827] : Configure CAA in accordance with the specified or defaulted policies :cl_cfg_sm_rt[828] : for Merge :cl_cfg_sm_rt[830] clctrl -tune -a epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).communication_mode = u epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).config_timeout = 240 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).deadman_mode = a epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).dr_enabled = 1 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).link_timeout = 30000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).local_merge_policy = h epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).network_fdt = 20000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).no_if_traffic_monitor = 0 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).node_down_delay = 10000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).node_timeout = 30000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).packet_ttl = 32 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).remote_hb_factor = 1 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).repos_mode = e epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).site_merge_policy = h :cl_cfg_sm_rt[831] clctrl_rc=0 :cl_cfg_sm_rt[832] : Return code from 'clctrl -tune -a' is 0 :cl_cfg_sm_rt[835] : If the current deadman mode is not set to ASSERT, :cl_cfg_sm_rt[836] : change it to that :cl_cfg_sm_rt[842] clctrl -tune -x deadman_mode :cl_cfg_sm_rt[842] cut -f2 -d: :cl_cfg_sm_rt[842] current_deadman_mode=a :cl_cfg_sm_rt[843] [[ a != a ]] :cl_cfg_sm_rt[849] : Determine the current site merge policy, to see if it needs :cl_cfg_sm_rt[850] : to be changed :cl_cfg_sm_rt[852] cut -f2 -d: :cl_cfg_sm_rt[852] clctrl -tune -x site_merge_policy :cl_cfg_sm_rt[852] current_merge_policy=h :cl_cfg_sm_rt[854] [[ Majority == Manual ]] :cl_cfg_sm_rt[865] [[ Majority == None ]] :cl_cfg_sm_rt[878] : Everything else - tie breaker, majority, nfs - is heuristic merge policy :cl_cfg_sm_rt[880] [[ h != h ]] :cl_cfg_sm_rt[886] clctrl_rc=0 :cl_cfg_sm_rt[887] (( 0 != 0 )) :cl_cfg_sm_rt[901] [[ -n '' ]] :cl_cfg_sm_rt[919] RSCT_START_RETRIES=0 :cl_cfg_sm_rt[919] typeset -li RSCT_START_RETRIES :cl_cfg_sm_rt[920] MIN_RSCT_RETRIES=1 :cl_cfg_sm_rt[920] typeset -li MIN_RSCT_RETRIES :cl_cfg_sm_rt[921] MAX_RSCT_RETRIES=15 :cl_cfg_sm_rt[921] typeset -li MAX_RSCT_RETRIES :cl_cfg_sm_rt[922] grep ^RSCT_START_RETRIES /etc/environment :cl_cfg_sm_rt[922] eval :cl_cfg_sm_rt[923] (( 0 < 1 )) :cl_cfg_sm_rt[923] RSCT_START_RETRIES=1 :cl_cfg_sm_rt[924] (( 1 > 15 )) :cl_cfg_sm_rt[926] RSCT_TB_WAITTIME=0 :cl_cfg_sm_rt[926] typeset -li RSCT_TB_WAITTIME :cl_cfg_sm_rt[927] grep ^RSCT_TB_WAITTIME /etc/environment :cl_cfg_sm_rt[927] eval :cl_cfg_sm_rt[928] (( 0 <= 0 )) :cl_cfg_sm_rt[928] RSCT_TB_WAITTIME=30 :cl_cfg_sm_rt[930] RSCT_START_WAIT=0 :cl_cfg_sm_rt[930] typeset -li RSCT_START_WAIT :cl_cfg_sm_rt[931] MIN_RSCT_WAIT=10 :cl_cfg_sm_rt[931] typeset -li MIN_RSCT_WAIT :cl_cfg_sm_rt[932] MAX_RSCT_WAIT=60 :cl_cfg_sm_rt[932] typeset -li MAX_RSCT_WAIT :cl_cfg_sm_rt[933] grep ^RSCT_START_WAIT /etc/environment :cl_cfg_sm_rt[933] eval :cl_cfg_sm_rt[934] (( 0 < 10 )) :cl_cfg_sm_rt[934] RSCT_START_WAIT=10 :cl_cfg_sm_rt[935] (( 10 > 60 )) :cl_cfg_sm_rt[937] (( retries=0)) :cl_cfg_sm_rt[937] (( 0 < 1)) :cl_cfg_sm_rt[939] lsrsrc IBM.PeerNode :cl_cfg_sm_rt[939] 1>> /var/hacmp/log/clutils.log 2>& 1 :cl_cfg_sm_rt[941] break :cl_cfg_sm_rt[947] (( 0 >= 1 )) :cl_cfg_sm_rt[954] : Configure RSCT in accordance with the specified or defaulted policies :cl_cfg_sm_rt[955] : for Split :cl_cfg_sm_rt[965] CT_MANAGEMENT_SCOPE=2 :cl_cfg_sm_rt[965] export CT_MANAGEMENT_SCOPE :cl_cfg_sm_rt[966] lsrsrc -t -c -x IBM.PeerNode OpQuorumTieBreaker :cl_cfg_sm_rt[966] Current_TB='"Success" ' :cl_cfg_sm_rt[967] Current_TB='"Success' :cl_cfg_sm_rt[968] Current_TB=Success :cl_cfg_sm_rt[969] [[ None == None ]] :cl_cfg_sm_rt[971] [[ Success == Success ]] :cl_cfg_sm_rt[973] chrsrc -c IBM.PeerNode OpQuorumTieBreaker=Operator :cl_cfg_sm_rt[974] src_rc=0 :cl_cfg_sm_rt[975] (( 0 != 0 )) :cl_cfg_sm_rt[981] (( 0 == 0 )) :cl_cfg_sm_rt[983] chrsrc -s Name='="Success"' IBM.TieBreaker PostReserveWaitTime=30 :cl_cfg_sm_rt[984] src_rc=0 :cl_cfg_sm_rt[985] (( 0 != 0 )) :cl_cfg_sm_rt[990] chrsrc -c IBM.PeerNode OpQuorumTieBreaker=Success :cl_cfg_sm_rt[991] src_rc=0 :cl_cfg_sm_rt[992] (( 0 != 0 )) :cl_cfg_sm_rt[1044] src_rc=0 :cl_cfg_sm_rt[1045] (( 0 != 0 )) :cl_cfg_sm_rt[1053] : Configure RSCT Action :cl_cfg_sm_rt[1055] chrsrc -c IBM.PeerNode QuorumType=4 :cl_cfg_sm_rt[1056] src_rc=0 :cl_cfg_sm_rt[1057] (( 0 != 0 )) :cl_cfg_sm_rt[1064] chrsrc -c IBM.PeerNode CriticalMode=2 :cl_cfg_sm_rt[1065] src_rc=0 :cl_cfg_sm_rt[1066] (( 0 != 0 )) :cl_cfg_sm_rt[1073] [[ Reboot == Reboot ]] :cl_cfg_sm_rt[1075] chrsrc -c IBM.PeerNode CritRsrcProtMethod=1 :cl_cfg_sm_rt[1077] src_rc=0 :cl_cfg_sm_rt[1078] (( 0 != 0 )) :cl_cfg_sm_rt[1086] : Configure RSCT Critical Resource Daemon Grace Period for cluster level. :cl_cfg_sm_rt[1088] typeset grace_period :cl_cfg_sm_rt[1089] clodmget -f crit_daemon_restart_grace_period HACMPcluster :cl_cfg_sm_rt[1089] grace_period=60 :cl_cfg_sm_rt[1090] lsrsrc -c IBM.PeerNode :cl_cfg_sm_rt[1090] LC_ALL=C :cl_cfg_sm_rt[1090] awk -F= '{print $2}' :cl_cfg_sm_rt[1090] grep CritDaemonRestartGracePeriod :cl_cfg_sm_rt[1090] rsct_grace_period=' 60' :cl_cfg_sm_rt[1091] [[ -n ' 60' ]] :cl_cfg_sm_rt[1092] (( 60 != 60 )) :cl_cfg_sm_rt[1104] : Configure RSCT Critical Resource Daemon Grace Period for node level. :cl_cfg_sm_rt[1106] typeset node_grace_period :cl_cfg_sm_rt[1107] typeset node_list :cl_cfg_sm_rt[1108] typeset rsct_node_grace_period :cl_cfg_sm_rt[1110] : Get the CAA active nodes list :cl_cfg_sm_rt[1112] lscluster -m :cl_cfg_sm_rt[1112] grep -p 'State of node: UP' :cl_cfg_sm_rt[1112] grep -w 'Node name:' :cl_cfg_sm_rt[1112] cut -f2 -d: :cl_cfg_sm_rt[1112] node_list=$' epprda\n epprds' :cl_cfg_sm_rt[1115] clodmget -n -q object='COMMUNICATION_PATH and value=epprda' -f name HACMPnode :cl_cfg_sm_rt[1115] host_name=epprda :cl_cfg_sm_rt[1116] clodmget -n -q object='CRIT_DAEMON_RESTART_GRACE_PERIOD and name=epprda' -f value HACMPnode :cl_cfg_sm_rt[1116] node_grace_period='' :cl_cfg_sm_rt[1117] [[ -n '' ]] :cl_cfg_sm_rt[1115] clodmget -n -q object='COMMUNICATION_PATH and value=epprds' -f name HACMPnode :cl_cfg_sm_rt[1115] host_name=epprds :cl_cfg_sm_rt[1116] clodmget -n -q object='CRIT_DAEMON_RESTART_GRACE_PERIOD and name=epprds' -f value HACMPnode :cl_cfg_sm_rt[1116] node_grace_period='' :cl_cfg_sm_rt[1117] [[ -n '' ]] :cl_cfg_sm_rt[1134] : Success exit. Display the CAA and RSCT configuration :cl_cfg_sm_rt[1136] clctrl -tune -a epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).communication_mode = u epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).config_timeout = 240 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).deadman_mode = a epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).dr_enabled = 1 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).link_timeout = 30000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).local_merge_policy = h epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).network_fdt = 20000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).no_if_traffic_monitor = 0 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).node_down_delay = 10000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).node_timeout = 30000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).packet_ttl = 32 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).remote_hb_factor = 1 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).repos_mode = e epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).site_merge_policy = h :cl_cfg_sm_rt[1137] lscluster -m Calling node query for all nodes... Node query number of nodes examined: 2 Node name: epprda Cluster shorthand id for node: 1 UUID for node: f42873b8-9ee2-11ed-8018-fae6134ea920 State of node: UP NODE_LOCAL Reason: NONE Smoothed rtt to node: 0 Mean Deviation in network rtt to node: 0 Number of clusters node is a member in: 1 CLUSTER NAME SHID UUID epprda_cluster 0 f43c91c2-9ee2-11ed-8018-fae6134ea920 SITE NAME SHID UUID LOCAL 1 51735173-5173-5173-5173-517351735173 Points of contact for node: 0 ---------------------------------------------------------------------------- Node name: epprds Cluster shorthand id for node: 2 UUID for node: f42873fe-9ee2-11ed-8018-fae6134ea920 State of node: UP Reason: NONE Smoothed rtt to node: 7 Mean Deviation in network rtt to node: 3 Number of clusters node is a member in: 1 CLUSTER NAME SHID UUID epprda_cluster 0 f43c91c2-9ee2-11ed-8018-fae6134ea920 SITE NAME SHID UUID LOCAL 1 51735173-5173-5173-5173-517351735173 Points of contact for node: 1 ----------------------------------------------------------------------- Interface State Protocol Status SRC_IP->DST_IP ----------------------------------------------------------------------- tcpsock->02 UP IPv4 none 61.81.244.134->61.81.244.123 :cl_cfg_sm_rt[1138] lsrsrc -x -A b IBM.PeerNode resource 1: Name = "epprda" NodeList = {1} RSCTVersion = "3.2.6.4" ClassVersions = {} CritRsrcProtMethod = 0 IsQuorumNode = 1 IsPreferredGSGL = 1 NodeUUID = "f42873b8-9ee2-11ed-8018-fae6134ea920" HostName = "epprda" TBPriority = 0 CritDaemonRestartGracePeriod = -1 ActivePeerDomain = "epprda_cluster" NodeNameList = {"epprda"} OpState = 1 ConfigChanged = 1 CritRsrcActive = 0 OpUsabilityState = 1 MaintenanceState = 0 resource 2: Name = "epprds" NodeList = {2} RSCTVersion = "3.2.6.4" ClassVersions = {} CritRsrcProtMethod = 0 IsQuorumNode = 1 IsPreferredGSGL = 1 NodeUUID = "f42873fe-9ee2-11ed-8018-fae6134ea920" HostName = "epprds" TBPriority = 0 CritDaemonRestartGracePeriod = -1 ActivePeerDomain = "epprda_cluster" NodeNameList = {"epprds"} OpState = 1 ConfigChanged = 1 CritRsrcActive = 0 OpUsabilityState = 1 MaintenanceState = 0 :cl_cfg_sm_rt[1139] lsrsrc -x -c -A b IBM.PeerNode resource 1: CommittedRSCTVersion = "3.2.2.0" ActiveVersionChanging = 0 OpQuorumOverride = 0 CritRsrcProtMethod = 1 OpQuorumTieBreaker = "Success" QuorumType = 4 QuorumGroupName = "" Fanout = 32 OpFenceGroup = "" NodeCleanupCommand = "" NodeCleanupCriteria = "" QuorumLessStartupTimeout = 120 CriticalMode = 2 NotifyQuorumChangedCommand = "" NamePolicy = 1 LiveUpdateOptions = "" QuorumNotificationRespWaitTime = 0 MaintenanceModeConfig = "" CritDaemonRestartGracePeriod = 60 :cl_cfg_sm_rt[1141] return 0 :cl_cfg_sm_rt[1] on_exit 0 :node_up[441] : exit status of cl_cfg_sm_rt is 0 :node_up[498] : Enable NFS crossmounts during manual start :node_up[500] [[ -n false ]] :node_up[500] [[ false == true ]] :node_up[607] : When RG dependencies are not configured we call node_up_local/remote, :node_up[608] : followed by process_resources to process any remaining groups :node_up[610] [[ TRUE == FALSE ]] :node_up[657] [[ epprda == epprda ]] :node_up[660] : Perform any deferred TCP daemon startup, if necessary, :node_up[661] : along with any necessary start up of iSCSI devices. :node_up[663] cl_telinit :cl_telinit[178] version=%I% :cl_telinit[182] TELINIT_FILE=/usr/es/sbin/cluster/.telinit :cl_telinit[183] USE_TELINIT_FILE=/usr/es/sbin/cluster/.use_telinit :cl_telinit[185] [[ -f /usr/es/sbin/cluster/.use_telinit ]] :cl_telinit[189] USE_TELINIT=0 :cl_telinit[198] [[ '' == -boot ]] :cl_telinit[236] cl_lsitab clinit :cl_telinit[236] 1> /dev/null 2>& 1 :cl_telinit[239] : telinit a disabled :cl_telinit[241] return 0 :node_up[664] : exit status of cl_telinit is: 0 :node_up[667] return 0 Jan 28 2023 18:06:49 EVENT COMPLETED: node_up epprda 0 |2023-01-28T18:06:49|12510|EVENT COMPLETED: node_up epprda 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:06:49.778878 + echo '|2023-01-28T18:06:49.778878|INFO: node_up|epprda|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 18:06:51 EVENT START: rg_move_fence epprda 1 |2023-01-28T18:06:51|12511|EVENT START: rg_move_fence epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:06:51.984471 + echo '|2023-01-28T18:06:51.984471|INFO: rg_move_fence|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_fence[62] [[ high == high ]] :rg_move_fence[62] version=1.11 :rg_move_fence[63] NODENAME=epprda :rg_move_fence[63] export NODENAME :rg_move_fence[65] set -u :rg_move_fence[67] [ 2 != 2 ] :rg_move_fence[73] set +u :rg_move_fence[75] [[ -z TRUE ]] :rg_move_fence[80] [[ TRUE == TRUE ]] :rg_move_fence[82] LOCAL_NODENAME=epprda :rg_move_fence[83] odmget -qid=1 HACMPgroup :rg_move_fence[83] egrep 'group =' :rg_move_fence[83] awk '{print $3}' :rg_move_fence[83] eval RGNAME='"epprd_rg"' :rg_move_fence[1] RGNAME=epprd_rg +epprd_rg:rg_move_fence[84] GROUPNAME=epprd_rg +epprd_rg:rg_move_fence[85] group_state='$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[86] set +u +epprd_rg:rg_move_fence[87] eval print '$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[1] print +epprd_rg:rg_move_fence[87] RG_MOVE_ONLINE='' +epprd_rg:rg_move_fence[87] export RG_MOVE_ONLINE +epprd_rg:rg_move_fence[88] set -u +epprd_rg:rg_move_fence[89] RG_MOVE_ONLINE=TMP_ERROR +epprd_rg:rg_move_fence[91] set -a +epprd_rg:rg_move_fence[92] clsetenvgrp epprda rg_move epprd_rg '' :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 +epprd_rg:rg_move_fence[92] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[93] RC=0 +epprd_rg:rg_move_fence[94] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[1] FORCEDOWN_GROUPS='' +epprd_rg:rg_move_fence[2] RESOURCE_GROUPS='' +epprd_rg:rg_move_fence[3] HOMELESS_GROUPS='' +epprd_rg:rg_move_fence[4] HOMELESS_FOLLOWER_GROUPS='' +epprd_rg:rg_move_fence[5] ERRSTATE_GROUPS='' +epprd_rg:rg_move_fence[6] PRINCIPAL_ACTIONS='' +epprd_rg:rg_move_fence[7] ASSOCIATE_ACTIONS='' +epprd_rg:rg_move_fence[8] AUXILLIARY_ACTIONS='' +epprd_rg:rg_move_fence[8] SIBLING_GROUPS='' +epprd_rg:rg_move_fence[9] SIBLING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[10] SIBLING_ACQUIRING_GROUPS='' +epprd_rg:rg_move_fence[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[12] SIBLING_RELEASING_GROUPS='' +epprd_rg:rg_move_fence[13] SIBLING_RELEASING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[95] set +a +epprd_rg:rg_move_fence[96] [ 0 -ne 0 ] +epprd_rg:rg_move_fence[103] process_resources FENCE :rg_move_fence[3318] version=1.169 :rg_move_fence[3321] STATUS=0 :rg_move_fence[3322] sddsrv_off=FALSE :rg_move_fence[3324] true :rg_move_fence[3326] : call rgpa, and it will tell us what to do next :rg_move_fence[3328] set -a :rg_move_fence[3329] clRGPA FENCE :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa FENCE 2023-01-28T18:06:52.089058 clrgpa :clRGPA[+55] exit 0 :rg_move_fence[3329] eval JOB_TYPE=NONE :rg_move_fence[1] JOB_TYPE=NONE :rg_move_fence[3330] RC=0 :rg_move_fence[3331] set +a :rg_move_fence[3333] (( 0 != 0 )) :rg_move_fence[3342] RESOURCE_GROUPS='' :rg_move_fence[3343] GROUPNAME='' :rg_move_fence[3343] export GROUPNAME :rg_move_fence[3353] IS_SERVICE_START=1 :rg_move_fence[3354] IS_SERVICE_STOP=1 :rg_move_fence[3360] [[ NONE == RELEASE ]] :rg_move_fence[3360] [[ NONE == ONLINE ]] :rg_move_fence[3729] break :rg_move_fence[3740] : If sddsrv was turned off above, turn it back on again :rg_move_fence[3742] [[ FALSE == TRUE ]] :rg_move_fence[3747] exit 0 +epprd_rg:rg_move_fence[104] : exit status of process_resources FENCE is: 0 +epprd_rg:rg_move_fence[107] [[ TRUE == TRUE ]] +epprd_rg:rg_move_fence[109] export EVENT_TYPE +epprd_rg:rg_move_fence[110] echo ACQUIRE_PRIMARY ACQUIRE_PRIMARY +epprd_rg:rg_move_fence[111] [[ -n '' ]] +epprd_rg:rg_move_fence[141] exit 0 Jan 28 2023 18:06:52 EVENT COMPLETED: rg_move_fence epprda 1 0 |2023-01-28T18:06:52|12511|EVENT COMPLETED: rg_move_fence epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:06:52.181815 + echo '|2023-01-28T18:06:52.181815|INFO: rg_move_fence|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 18:06:52 EVENT START: rg_move_acquire epprda 1 |2023-01-28T18:06:52|12511|EVENT START: rg_move_acquire epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:06:52.381527 + echo '|2023-01-28T18:06:52.381527|INFO: rg_move_acquire|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_acquire[+54] [[ high == high ]] :rg_move_acquire[+54] version=1.9.1.7 :rg_move_acquire[+57] set -u :rg_move_acquire[+59] [ 2 != 2 ] :rg_move_acquire[+65] set +u :rg_move_acquire[+67] :rg_move_acquire[+67] clodmget -n -q id=1 -f group HACMPgroup RG=epprd_rg :rg_move_acquire[+68] export RG :rg_move_acquire[+70] [[ ACQUIRE_PRIMARY == ACQUIRE_PRIMARY ]] :rg_move_acquire[+75] typeset -i anhp_ret=0 :rg_move_acquire[+76] typeset -i scsi_ret=0 :rg_move_acquire[+78] clodmget -n -q policy = anhp -f value HACMPsplitmerge :rg_move_acquire[+78] typeset ANHP_ENABLED= :rg_move_acquire[+78] [[ == Yes ]] :rg_move_acquire[+87] clodmget -n -q policy = scsi -f value HACMPsplitmerge :rg_move_acquire[+87] typeset SCSIPR_ENABLED= :rg_move_acquire[+87] [[ == Yes ]] :rg_move_acquire[+106] (( 0 == 1 && 0 == 1 )) :rg_move_acquire[+109] (( 0 == 1 && 0 == 0 )) :rg_move_acquire[+112] (( 0 == 1 && 0 == 0 )) :rg_move_acquire[+118] clcallev rg_move epprda 1 ACQUIRE Jan 28 2023 18:06:52 EVENT START: rg_move epprda 1 ACQUIRE |2023-01-28T18:06:52|12511|EVENT START: rg_move epprda 1 ACQUIRE| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-01-28T18:06:52.512473 :clevlog[amlog_trace:320] echo '|2023-01-28T18:06:52.512473|INFO: rg_move|epprd_rg|epprda|1|ACQUIRE' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :rg_move[76] version=%I% :rg_move[86] STATUS=0 :rg_move[88] [[ ! -n '' ]] :rg_move[90] EMULATE=REAL :rg_move[96] set -u :rg_move[98] NODENAME=epprda :rg_move[98] export NODENAME :rg_move[99] RGID=1 :rg_move[100] (( 3 == 3 )) :rg_move[102] ACTION=ACQUIRE :rg_move[108] : serial number for this event is 12511 :rg_move[112] RG_UP_POSTEVENT_ON_NODE=epprda :rg_move[112] export RG_UP_POSTEVENT_ON_NODE :rg_move[116] clodmget -qid=1 -f group -n HACMPgroup :rg_move[116] eval RGNAME=epprd_rg :rg_move[1] RGNAME=epprd_rg :rg_move[118] UPDATESTATD=0 :rg_move[119] export UPDATESTATD :rg_move[123] RG_MOVE_EVENT=true :rg_move[123] export RG_MOVE_EVENT :rg_move[128] group_state='$RESGRP_epprd_rg_epprda' :rg_move[129] set +u :rg_move[130] eval print '$RESGRP_epprd_rg_epprda' :rg_move[1] print :rg_move[130] RG_MOVE_ONLINE='' :rg_move[130] export RG_MOVE_ONLINE :rg_move[131] set -u :rg_move[132] RG_MOVE_ONLINE=TMP_ERROR :rg_move[139] rm -f /tmp/.NFSSTOPPED :rg_move[140] rm -f /tmp/.RPCLOCKDSTOPPED :rg_move[147] set -a :rg_move[148] clsetenvgrp epprda rg_move epprd_rg :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 :rg_move[148] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[149] RC=0 :rg_move[150] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[1] FORCEDOWN_GROUPS='' :rg_move[2] RESOURCE_GROUPS='' :rg_move[3] HOMELESS_GROUPS='' :rg_move[4] HOMELESS_FOLLOWER_GROUPS='' :rg_move[5] ERRSTATE_GROUPS='' :rg_move[6] PRINCIPAL_ACTIONS='' :rg_move[7] ASSOCIATE_ACTIONS='' :rg_move[8] AUXILLIARY_ACTIONS='' :rg_move[8] SIBLING_GROUPS='' :rg_move[9] SIBLING_NODES_BY_GROUP='' :rg_move[10] SIBLING_ACQUIRING_GROUPS='' :rg_move[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' :rg_move[12] SIBLING_RELEASING_GROUPS='' :rg_move[13] SIBLING_RELEASING_NODES_BY_GROUP='' :rg_move[151] set +a :rg_move[155] (( 0 != 0 )) :rg_move[155] [[ -z epprd_rg ]] :rg_move[164] [[ -z TRUE ]] :rg_move[241] AM_SYNC_CALLED_BY=RG_MOVE :rg_move[241] export AM_SYNC_CALLED_BY :rg_move[242] process_resources :process_resources[3318] version=1.169 :process_resources[3321] STATUS=0 :process_resources[3322] sddsrv_off=FALSE :process_resources[3324] true :process_resources[3326] : call rgpa, and it will tell us what to do next :process_resources[3328] set -a :process_resources[3329] clRGPA :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa 2023-01-28T18:06:52.632998 clrgpa :clRGPA[+55] exit 0 :process_resources[3329] eval JOB_TYPE=ACQUIRE RESOURCE_GROUPS='"epprd_rg"' PRINCIPAL_ACTION='"ACQUIRE"' AUXILLIARY_ACTION='"NONE"' :process_resources[1] JOB_TYPE=ACQUIRE :process_resources[1] RESOURCE_GROUPS=epprd_rg :process_resources[1] PRINCIPAL_ACTION=ACQUIRE :process_resources[1] AUXILLIARY_ACTION=NONE :process_resources[3330] RC=0 :process_resources[3331] set +a :process_resources[3333] (( 0 != 0 )) :process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[3360] [[ ACQUIRE == ONLINE ]] +epprd_rg:process_resources[3652] set_resource_group_state ACQUIRING +epprd_rg:process_resources[set_resource_group_state:82] PS4_FUNC=set_resource_group_state +epprd_rg:process_resources[set_resource_group_state:82] typeset PS4_FUNC +epprd_rg:process_resources[set_resource_group_state:83] [[ high == high ]] +epprd_rg:process_resources[set_resource_group_state:83] set -x +epprd_rg:process_resources[set_resource_group_state:84] STAT=0 +epprd_rg:process_resources[set_resource_group_state:85] new_status=ACQUIRING +epprd_rg:process_resources[set_resource_group_state:89] export GROUPNAME +epprd_rg:process_resources[set_resource_group_state:90] [[ ACQUIRING != DOWN ]] +epprd_rg:process_resources[set_resource_group_state:92] clchdaemons -d clstrmgr_scripts -t resource_locator -n epprda -o epprd_rg -v ACQUIRING +epprd_rg:process_resources[set_resource_group_state:100] : Resource Manager Updates +epprd_rg:process_resources[set_resource_group_state:105] amlog_trace '' 'acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:318] clcycle clavailability.log +epprd_rg:process_resources[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:process_resources[amlog_trace:319] cltime +epprd_rg:process_resources[amlog_trace:319] DATE=2023-01-28T18:06:52.667701 +epprd_rg:process_resources[amlog_trace:320] echo '|2023-01-28T18:06:52.667701|INFO: acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:process_resources[set_resource_group_state:106] cl_RMupdate acquiring epprd_rg process_resources 2023-01-28T18:06:52.691240 2023-01-28T18:06:52.695727 +epprd_rg:process_resources[set_resource_group_state:153] return 0 +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:06:52.707522 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=WPAR ACTION=ACQUIRE RESOURCE_GROUPS='"epprd_rg' '"' +epprd_rg:process_resources[1] JOB_TYPE=WPAR +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ WPAR == RELEASE ]] +epprd_rg:process_resources[3360] [[ WPAR == ONLINE ]] +epprd_rg:process_resources[3492] process_wpars ACQUIRE +epprd_rg:process_resources[process_wpars:3265] PS4_FUNC=process_wpars +epprd_rg:process_resources[process_wpars:3265] typeset PS4_FUNC +epprd_rg:process_resources[process_wpars:3266] [[ high == high ]] +epprd_rg:process_resources[process_wpars:3266] set -x +epprd_rg:process_resources[process_wpars:3267] STAT=0 +epprd_rg:process_resources[process_wpars:3268] action=ACQUIRE +epprd_rg:process_resources[process_wpars:3268] typeset action +epprd_rg:process_resources[process_wpars:3272] export GROUPNAME +epprd_rg:process_resources[process_wpars:3275] clstart_wpar +epprd_rg:clstart_wpar[180] version=1.12.1.1 +epprd_rg:clstart_wpar[184] [[ rg_move == reconfig_resource_acquire ]] +epprd_rg:clstart_wpar[184] [[ ACQUIRE_PRIMARY == reconfig_resource_acquire ]] +epprd_rg:clstart_wpar[193] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clstart_wpar[193] [[ -z '' ]] +epprd_rg:clstart_wpar[193] exit 0 +epprd_rg:process_resources[process_wpars:3276] RC=0 +epprd_rg:process_resources[process_wpars:3285] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[process_wpars:3294] return 0 +epprd_rg:process_resources[3493] RC=0 +epprd_rg:process_resources[3495] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:06:52.737789 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=SERVICE_LABELS ACTION=ACQUIRE IP_LABELS='"epprd"' RESOURCE_GROUPS='"epprd_rg' '"' COMMUNICATION_LINKS='""' +epprd_rg:process_resources[1] JOB_TYPE=SERVICE_LABELS +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] IP_LABELS=epprd +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] COMMUNICATION_LINKS='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ SERVICE_LABELS == RELEASE ]] +epprd_rg:process_resources[3360] [[ SERVICE_LABELS == ONLINE ]] +epprd_rg:process_resources[3407] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[3409] acquire_service_labels +epprd_rg:process_resources[acquire_service_labels:3083] PS4_FUNC=acquire_service_labels +epprd_rg:process_resources[acquire_service_labels:3083] typeset PS4_FUNC +epprd_rg:process_resources[acquire_service_labels:3084] [[ high == high ]] +epprd_rg:process_resources[acquire_service_labels:3084] set -x +epprd_rg:process_resources[acquire_service_labels:3085] STAT=0 +epprd_rg:process_resources[acquire_service_labels:3086] clcallev acquire_service_addr Jan 28 2023 18:06:52 EVENT START: acquire_service_addr |2023-01-28T18:06:52|12511|EVENT START: acquire_service_addr | +epprd_rg:acquire_service_addr[416] version=1.74.1.5 +epprd_rg:acquire_service_addr[423] [[ SERVICE_LABELS != 0 ]] +epprd_rg:acquire_service_addr[423] [[ SERVICE_LABELS != GROUP ]] +epprd_rg:acquire_service_addr[424] PROC_RES=true +epprd_rg:acquire_service_addr[440] saveNSORDER=UNDEFINED +epprd_rg:acquire_service_addr[441] NSORDER=local +epprd_rg:acquire_service_addr[442] export NSORDER +epprd_rg:acquire_service_addr[445] cl_RMupdate resource_acquiring All_service_addrs acquire_service_addr 2023-01-28T18:06:52.818968 2023-01-28T18:06:52.823233 +epprd_rg:acquire_service_addr[452] export GROUPNAME +epprd_rg:acquire_service_addr[458] [[ true == true ]] +epprd_rg:acquire_service_addr[459] get_list_head epprd +epprd_rg:acquire_service_addr[459] read SERVICELABELS +epprd_rg:acquire_service_addr[460] get_list_tail epprd +epprd_rg:acquire_service_addr[460] read IP_LABELS +epprd_rg:acquire_service_addr[471] clgetif -a epprd +epprd_rg:acquire_service_addr[471] 2> /dev/null +epprd_rg:acquire_service_addr[472] (( 3 != 0 )) +epprd_rg:acquire_service_addr[477] cllsif -J '~' -Sn epprd +epprd_rg:acquire_service_addr[477] cut -d~ -f3 +epprd_rg:acquire_service_addr[477] uniq +epprd_rg:acquire_service_addr[477] NETWORK=net_ether_01 +epprd_rg:acquire_service_addr[478] cllsif -J '~' -Si epprda +epprd_rg:acquire_service_addr[478] awk -F~ -v NET=net_ether_01 '{if ($2 == "boot" && $3 == NET) print $1}' +epprd_rg:acquire_service_addr[478] sort +epprd_rg:acquire_service_addr[478] boot_list=epprda +epprd_rg:acquire_service_addr[480] [[ -z epprda ]] +epprd_rg:acquire_service_addr[492] best_boot_addr net_ether_01 epprda +epprd_rg:acquire_service_addr[best_boot_addr:106] NETWORK=net_ether_01 +epprd_rg:acquire_service_addr[best_boot_addr:106] typeset NETWORK +epprd_rg:acquire_service_addr[best_boot_addr:107] shift +epprd_rg:acquire_service_addr[best_boot_addr:108] candidate_boots=epprda +epprd_rg:acquire_service_addr[best_boot_addr:108] typeset candidate_boots +epprd_rg:acquire_service_addr[best_boot_addr:112] echo epprda +epprd_rg:acquire_service_addr[best_boot_addr:112] tr ' ' '\n' +epprd_rg:acquire_service_addr[best_boot_addr:112] wc -l +epprd_rg:acquire_service_addr[best_boot_addr:112] num_candidates=' 1' +epprd_rg:acquire_service_addr[best_boot_addr:112] typeset -li num_candidates +epprd_rg:acquire_service_addr[best_boot_addr:113] (( 1 == 1 )) +epprd_rg:acquire_service_addr[best_boot_addr:114] echo epprda +epprd_rg:acquire_service_addr[best_boot_addr:115] return +epprd_rg:acquire_service_addr[492] boot_addr=epprda +epprd_rg:acquire_service_addr[493] (( 0 != 0 )) +epprd_rg:acquire_service_addr[505] clgetif -a epprda +epprd_rg:acquire_service_addr[505] 2> /dev/null +epprd_rg:acquire_service_addr[505] cut -f1 +epprd_rg:acquire_service_addr[505] INTERFACE='en0 ' +epprd_rg:acquire_service_addr[507] cllsif -J '~' -Sn epprda +epprd_rg:acquire_service_addr[507] cut -f7,9 -d~ +epprd_rg:acquire_service_addr[508] read boot_dot_addr INTERFACE +epprd_rg:acquire_service_addr[508] IFS='~' +epprd_rg:acquire_service_addr[510] [[ -z en0 ]] +epprd_rg:acquire_service_addr[527] cllsif -J '~' -Sn epprd +epprd_rg:acquire_service_addr[527] cut -f7,11,15 -d~ +epprd_rg:acquire_service_addr[527] uniq +epprd_rg:acquire_service_addr[528] read service_dot_addr NETMASK INET_FAMILY +epprd_rg:acquire_service_addr[528] IFS='~' +epprd_rg:acquire_service_addr[530] [[ AF_INET == AF_INET6 ]] +epprd_rg:acquire_service_addr[534] cl_swap_IP_address rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0 +epprd_rg:cl_swap_IP_address[462] version=1.9.14.8 +epprd_rg:cl_swap_IP_address[464] cl_get_path -S +epprd_rg:cl_swap_IP_address[464] OP_SEP='~' +epprd_rg:cl_swap_IP_address[465] LC_ALL=C +epprd_rg:cl_swap_IP_address[465] export LC_ALL +epprd_rg:cl_swap_IP_address[466] RESTORE_ROUTES=/usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[468] cl_echo 33 'Starting execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0' /usr/es/sbin/cluster/events/utils/cl_swap_IP_address 'rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0' Jan 28 2023 18:06:52Starting execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0+epprd_rg:cl_swap_IP_address[470] typeset -i oslevel +epprd_rg:cl_swap_IP_address[471] /usr/bin/oslevel -r +epprd_rg:cl_swap_IP_address[471] /usr/bin/sed s/-//g +epprd_rg:cl_swap_IP_address[471] oslevel=720005 +epprd_rg:cl_swap_IP_address[476] [[ 6 == 6 ]] +epprd_rg:cl_swap_IP_address[477] [[ 6 == 7 ]] +epprd_rg:cl_swap_IP_address[484] no -a +epprd_rg:cl_swap_IP_address[484] grep ipignoreredirects +epprd_rg:cl_swap_IP_address[484] awk '{ print $3 }' +epprd_rg:cl_swap_IP_address[484] PRIOR_IPIGNORE_REDIRECTS_VALUE=0 +epprd_rg:cl_swap_IP_address[485] /usr/sbin/no -o ipignoreredirects=1 Setting ipignoreredirects to 1 +epprd_rg:cl_swap_IP_address[490] PROC_RES=false +epprd_rg:cl_swap_IP_address[491] [[ SERVICE_LABELS != 0 ]] +epprd_rg:cl_swap_IP_address[491] [[ SERVICE_LABELS != GROUP ]] +epprd_rg:cl_swap_IP_address[492] PROC_RES=true +epprd_rg:cl_swap_IP_address[495] set -u +epprd_rg:cl_swap_IP_address[497] RC=0 +epprd_rg:cl_swap_IP_address[504] netstat -in Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll en0 1500 link#2 fa.e6.13.4e.a9.20 183751141 0 60763439 0 0 en0 1500 61.81.244 61.81.244.134 183751141 0 60763439 0 0 lo0 16896 link#1 34273755 0 34273755 0 0 lo0 16896 127 127.0.0.1 34273755 0 34273755 0 0 lo0 16896 ::1%1 34273755 0 34273755 0 0 +epprd_rg:cl_swap_IP_address[505] netstat -rnC Routing tables Destination Gateway Flags Wt Policy If Cost Config_Cost Route tree for Protocol Family 2 (Internet): default 61.81.244.1 UG 1 - en0 0 0 61.81.244.0 61.81.244.134 UHSb 1 - en0 0 0 => 61.81.244/24 61.81.244.134 U 1 - en0 0 0 61.81.244.134 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.255 61.81.244.134 UHSb 1 - en0 0 0 127/8 127.0.0.1 U 1 - lo0 0 0 Route tree for Protocol Family 24 (Internet v6): ::1%1 ::1%1 UH 1 - lo0 0 0 +epprd_rg:cl_swap_IP_address[506] CASC_OR_ROT=rotating +epprd_rg:cl_swap_IP_address[507] ACQ_OR_RLSE=acquire +epprd_rg:cl_swap_IP_address[508] IF=en0 +epprd_rg:cl_swap_IP_address[509] ADDR=61.81.244.156 +epprd_rg:cl_swap_IP_address[510] OLD_ADDR=61.81.244.134 +epprd_rg:cl_swap_IP_address[511] NETMASK=255.255.255.0 +epprd_rg:cl_swap_IP_address[514] [[ rotating == cascading ]] +epprd_rg:cl_swap_IP_address[525] cut -f3 -d~ +epprd_rg:cl_swap_IP_address[525] cllsif -J '~' -Sw -n 61.81.244.156 +epprd_rg:cl_swap_IP_address[525] NET=net_ether_01 +epprd_rg:cl_swap_IP_address[528] clodmget -qidentifier=61.81.244.156 -f max_aliases -n HACMPadapter +epprd_rg:cl_swap_IP_address[528] ALIAS_FIRST=0 +epprd_rg:cl_swap_IP_address[529] ifconfig en0 +epprd_rg:cl_swap_IP_address[529] LC_ALL=C +epprd_rg:cl_swap_IP_address[529] grep -c -w inet +epprd_rg:cl_swap_IP_address[529] NUM_ADDRS=1 +epprd_rg:cl_swap_IP_address[530] [[ acquire == acquire ]] +epprd_rg:cl_swap_IP_address[533] amlog_trace '' 'Aliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_swap_IP_address[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_swap_IP_address[amlog_trace:319] cltime +epprd_rg:cl_swap_IP_address[amlog_trace:319] DATE=2023-01-28T18:06:53.088344 +epprd_rg:cl_swap_IP_address[amlog_trace:320] echo '|2023-01-28T18:06:53.088344|INFO: Aliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_swap_IP_address[535] cl_echo 7310 'cl_swap_IP_address: Configuring network interface en0 with aliased IP address 61.81.244.156' cl_swap_IP_address en0 61.81.244.156 Jan 28 2023 18:06:53cl_swap_IP_address: Configuring network interface en0 with aliased IP address 61.81.244.156+epprd_rg:cl_swap_IP_address[546] (( 1 > 1 )) +epprd_rg:cl_swap_IP_address[550] clifconfig en0 alias 61.81.244.156 netmask 255.255.255.0 firstalias +epprd_rg:clifconfig[117] version=1.9 +epprd_rg:clifconfig[121] set -A args en0 alias 61.81.244.156 netmask 255.255.255.0 firstalias +epprd_rg:clifconfig[124] interface=en0 +epprd_rg:clifconfig[125] shift +epprd_rg:clifconfig[127] [[ -n alias ]] +epprd_rg:clifconfig[129] alias_val=1 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n 61.81.244.156 ]] +epprd_rg:clifconfig[147] params=' address=61.81.244.156' +epprd_rg:clifconfig[147] addr=61.81.244.156 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n netmask ]] +epprd_rg:clifconfig[149] params=' address=61.81.244.156 netmask=255.255.255.0' +epprd_rg:clifconfig[149] shift +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n firstalias ]] +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n '' ]] +epprd_rg:clifconfig[174] [[ -n 1 ]] +epprd_rg:clifconfig[174] [[ -n epprd_rg ]] +epprd_rg:clifconfig[175] clwparname epprd_rg +epprd_rg:clwparname[38] version=1.3.1.1 +epprd_rg:clwparname[44] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clwparname[44] [[ -z '' ]] +epprd_rg:clwparname[44] exit 0 +epprd_rg:clifconfig[175] WPARNAME='' +epprd_rg:clifconfig[176] (( 0 == 0 )) +epprd_rg:clifconfig[176] [[ -n '' ]] +epprd_rg:clifconfig[218] belongs_to_an_active_wpar 61.81.244.156 +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] [[ -z '' ]] +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] return 1 +epprd_rg:clifconfig[218] read wpar_name wpar_if wpar_netmask wpar_broadcast +epprd_rg:clifconfig[218] IFS='~' +epprd_rg:clifconfig[219] rc=1 +epprd_rg:clifconfig[221] [[ 1 == 0 ]] +epprd_rg:clifconfig[275] ifconfig en0 alias 61.81.244.156 netmask 255.255.255.0 firstalias +epprd_rg:cl_swap_IP_address[584] hats_adapter_notify en0 -e 61.81.244.156 alias 2023-01-28T18:06:53.141574 hats_adapter_notify 2023-01-28T18:06:53.145487 hats_adapter_notify +epprd_rg:cl_swap_IP_address[587] check_alias_status en0 61.81.244.156 acquire +epprd_rg:cl_swap_IP_address[check_alias_status:108] CH_INTERFACE=en0 +epprd_rg:cl_swap_IP_address[check_alias_status:109] CH_ADDRESS=61.81.244.156 +epprd_rg:cl_swap_IP_address[check_alias_status:110] CH_ACQ_OR_RLSE=acquire +epprd_rg:cl_swap_IP_address[check_alias_status:118] IF_IB=en0 +epprd_rg:cl_swap_IP_address[check_alias_status:120] echo en0 +epprd_rg:cl_swap_IP_address[check_alias_status:120] awk '{print index($0, "ib")}' +epprd_rg:cl_swap_IP_address[check_alias_status:120] IS_IB=0 +epprd_rg:cl_swap_IP_address[check_alias_status:122] [[ 0 != 1 ]] +epprd_rg:cl_swap_IP_address[check_alias_status:124] clifconfig en0 +epprd_rg:cl_swap_IP_address[check_alias_status:124] fgrep -w 61.81.244.156 +epprd_rg:cl_swap_IP_address[check_alias_status:124] awk '{print $2}' +epprd_rg:clifconfig[117] version=1.9 +epprd_rg:clifconfig[121] set -A args en0 +epprd_rg:clifconfig[124] interface=en0 +epprd_rg:clifconfig[125] shift +epprd_rg:clifconfig[127] [[ -n '' ]] +epprd_rg:clifconfig[174] [[ -n '' ]] +epprd_rg:clifconfig[218] belongs_to_an_active_wpar +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] [[ -z '' ]] +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] return 1 +epprd_rg:clifconfig[218] read wpar_name wpar_if wpar_netmask wpar_broadcast +epprd_rg:clifconfig[218] IFS='~' +epprd_rg:clifconfig[219] rc=1 +epprd_rg:clifconfig[221] [[ 1 == 0 ]] +epprd_rg:clifconfig[275] ifconfig en0 +epprd_rg:cl_swap_IP_address[check_alias_status:124] ADDR=61.81.244.156 +epprd_rg:cl_swap_IP_address[check_alias_status:129] [ acquire = acquire ] +epprd_rg:cl_swap_IP_address[check_alias_status:133] [[ 61.81.244.156 != 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[check_alias_status:144] return 0 +epprd_rg:cl_swap_IP_address[588] RC=0 +epprd_rg:cl_swap_IP_address[590] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[594] amlog_trace '' 'Aliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_swap_IP_address[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_swap_IP_address[amlog_trace:319] cltime +epprd_rg:cl_swap_IP_address[amlog_trace:319] DATE=2023-01-28T18:06:53.212367 +epprd_rg:cl_swap_IP_address[amlog_trace:320] echo '|2023-01-28T18:06:53.212367|INFO: Aliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_swap_IP_address[701] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[714] flush_arp +epprd_rg:cl_swap_IP_address[flush_arp:49] arp -an +epprd_rg:cl_swap_IP_address[flush_arp:49] grep '\?' +epprd_rg:cl_swap_IP_address[flush_arp:49] tr -d '()' +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.27 61.81.244.27 (61.81.244.27) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.218 61.81.244.218 (61.81.244.218) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.220 61.81.244.220 (61.81.244.220) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.239 61.81.244.239 (61.81.244.239) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.123 61.81.244.123 (61.81.244.123) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.146 61.81.244.146 (61.81.244.146) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.1 61.81.244.1 (61.81.244.1) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.156 61.81.244.156 (61.81.244.156) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:52] return 0 +epprd_rg:cl_swap_IP_address[716] netstat -in Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll en0 1500 link#2 fa.e6.13.4e.a9.20 183751231 0 60763566 0 0 en0 1500 61.81.244 61.81.244.156 183751231 0 60763566 0 0 en0 1500 61.81.244 61.81.244.134 183751231 0 60763566 0 0 lo0 16896 link#1 34273767 0 34273767 0 0 lo0 16896 127 127.0.0.1 34273767 0 34273767 0 0 lo0 16896 ::1%1 34273767 0 34273767 0 0 +epprd_rg:cl_swap_IP_address[717] netstat -rnC Routing tables Destination Gateway Flags Wt Policy If Cost Config_Cost Route tree for Protocol Family 2 (Internet): default 61.81.244.1 UG 1 - en0 0 0 61.81.244.0 61.81.244.156 UHSb 1 - en0 0 0 => 61.81.244/24 61.81.244.156 U 1 - en0 0 0 61.81.244.134 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.156 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.255 61.81.244.156 UHSb 1 - en0 0 0 127/8 127.0.0.1 U 1 - lo0 0 0 Route tree for Protocol Family 24 (Internet v6): ::1%1 ::1%1 UH 1 - lo0 0 0 +epprd_rg:cl_swap_IP_address[989] no -o ipignoreredirects=0 Setting ipignoreredirects to 0 +epprd_rg:cl_swap_IP_address[992] cl_echo 32 'Completed execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0. Exit status = 0' /usr/es/sbin/cluster/events/utils/cl_swap_IP_address 'rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0' 0 Jan 28 2023 18:06:53Completed execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0. Exit status = 0+epprd_rg:cl_swap_IP_address[994] date Sat Jan 28 18:06:53 KORST 2023 +epprd_rg:cl_swap_IP_address[996] exit 0 +epprd_rg:acquire_service_addr[537] RC=0 +epprd_rg:acquire_service_addr[539] (( 0 != 0 )) +epprd_rg:acquire_service_addr[549] [[ true == false ]] +epprd_rg:acquire_service_addr[560] cl_RMupdate resource_up All_nonerror_service_addrs acquire_service_addr 2023-01-28T18:06:53.292667 2023-01-28T18:06:53.296955 +epprd_rg:acquire_service_addr[565] [[ UNDEFINED != UNDEFINED ]] +epprd_rg:acquire_service_addr[568] NSORDER='' +epprd_rg:acquire_service_addr[568] export NSORDER +epprd_rg:acquire_service_addr[571] [[ true == false ]] +epprd_rg:acquire_service_addr[579] exit 0 Jan 28 2023 18:06:53 EVENT COMPLETED: acquire_service_addr 0 |2023-01-28T18:06:53|12511|EVENT COMPLETED: acquire_service_addr 0| +epprd_rg:process_resources[acquire_service_labels:3087] RC=0 +epprd_rg:process_resources[acquire_service_labels:3089] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[acquire_service_labels:3104] (( 0 != 0 )) +epprd_rg:process_resources[acquire_service_labels:3110] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. +epprd_rg:process_resources[acquire_service_labels:3112] return 0 +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:06:53.375856 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=DISKS ACTION=ACQUIRE HDISKS='"hdisk2,hdisk3,hdisk4,hdisk5,hdisk6,hdisk7,hdisk8"' RESOURCE_GROUPS='"epprd_rg' '"' VOLUME_GROUPS='"datavg,datavg,datavg,datavg,datavg,datavg,datavg"' +epprd_rg:process_resources[1] JOB_TYPE=DISKS +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] HDISKS=hdisk2,hdisk3,hdisk4,hdisk5,hdisk6,hdisk7,hdisk8 +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] VOLUME_GROUPS=datavg,datavg,datavg,datavg,datavg,datavg,datavg +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ DISKS == RELEASE ]] +epprd_rg:process_resources[3360] [[ DISKS == ONLINE ]] +epprd_rg:process_resources[3439] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[3441] FAILED_RR_RGS='' +epprd_rg:process_resources[3442] get_disks_main +epprd_rg:process_resources[get_disks_main:981] PS4_FUNC=get_disks_main +epprd_rg:process_resources[get_disks_main:981] typeset PS4_FUNC +epprd_rg:process_resources[get_disks_main:982] [[ high == high ]] +epprd_rg:process_resources[get_disks_main:982] set -x +epprd_rg:process_resources[get_disks_main:983] SKIPBRKRES=0 +epprd_rg:process_resources[get_disks_main:983] typeset -li SKIPBRKRES +epprd_rg:process_resources[get_disks_main:984] STAT=0 +epprd_rg:process_resources[get_disks_main:985] FAILURE_IN_METHOD=0 +epprd_rg:process_resources[get_disks_main:985] typeset -li FAILURE_IN_METHOD +epprd_rg:process_resources[get_disks_main:986] LIST_OF_FAILED_RGS='' +epprd_rg:process_resources[get_disks_main:989] : Below are the list of resources as generated by clrgpa +epprd_rg:process_resources[get_disks_main:991] RG_LIST=epprd_rg +epprd_rg:process_resources[get_disks_main:992] RDISK_LIST='' +epprd_rg:process_resources[get_disks_main:993] DISK_LIST=hdisk2,hdisk3,hdisk4,hdisk5,hdisk6,hdisk7,hdisk8 +epprd_rg:process_resources[get_disks_main:994] VG_LIST=datavg,datavg,datavg,datavg,datavg,datavg,datavg +epprd_rg:process_resources[get_disks_main:997] : Resource groups are processed individually. This is required because +epprd_rg:process_resources[get_disks_main:998] : the replication mechanism may differ between resource groups. +epprd_rg:process_resources[get_disks_main:1002] getReplicatedResources epprd_rg +epprd_rg:process_resources[getReplicatedResources:699] PS4_FUNC=getReplicatedResources +epprd_rg:process_resources[getReplicatedResources:699] typeset PS4_FUNC +epprd_rg:process_resources[getReplicatedResources:700] [[ high == high ]] +epprd_rg:process_resources[getReplicatedResources:700] set -x +epprd_rg:process_resources[getReplicatedResources:702] RV=false +epprd_rg:process_resources[getReplicatedResources:704] clodmget -n -f type HACMPrresmethods +epprd_rg:process_resources[getReplicatedResources:704] [[ -n 9 ]] +epprd_rg:process_resources[getReplicatedResources:707] : Replicated resource methods are defined, check for resources +epprd_rg:process_resources[getReplicatedResources:709] clodmget -q $'name like \'*_REP_RESOURCE\' AND group=epprd_rg' -f value -n HACMPresource +epprd_rg:process_resources[getReplicatedResources:709] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:718] : Verify if any backup profiles are configured and trigger cbm utilities based on that +epprd_rg:process_resources[getReplicatedResources:720] clodmget -q name=BACKUP_ENABLED -f value HACMPresource +epprd_rg:process_resources[getReplicatedResources:720] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:739] echo false +epprd_rg:process_resources[get_disks_main:1002] REPLICATED_RESOURCES=false +epprd_rg:process_resources[get_disks_main:1005] : Break out the resources for resource group epprd_rg +epprd_rg:process_resources[get_disks_main:1007] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[get_disks_main:1008] VOLUME_GROUPS='' +epprd_rg:process_resources[get_disks_main:1009] HDISKS='' +epprd_rg:process_resources[get_disks_main:1010] RHDISKS='' +epprd_rg:process_resources[get_disks_main:1011] RDISK_LIST='' +epprd_rg:process_resources[get_disks_main:1014] : Get the volume groups in resource group epprd_rg +epprd_rg:process_resources[get_disks_main:1016] print datavg,datavg,datavg,datavg,datavg,datavg,datavg +epprd_rg:process_resources[get_disks_main:1016] read VOLUME_GROUPS VG_LIST +epprd_rg:process_resources[get_disks_main:1016] IFS=: +epprd_rg:process_resources[get_disks_main:1018] : Removing duplicate entries in VG list. +epprd_rg:process_resources[get_disks_main:1020] echo datavg,datavg,datavg,datavg,datavg,datavg,datavg +epprd_rg:process_resources[get_disks_main:1020] tr , '\n' +epprd_rg:process_resources[get_disks_main:1020] sort -u +epprd_rg:process_resources[get_disks_main:1020] xargs +epprd_rg:process_resources[get_disks_main:1020] VOLUME_GROUPS=datavg +epprd_rg:process_resources[get_disks_main:1022] : Get the disks corresponding to these volume groups +epprd_rg:process_resources[get_disks_main:1024] print hdisk2,hdisk3,hdisk4,hdisk5,hdisk6,hdisk7,hdisk8 +epprd_rg:process_resources[get_disks_main:1024] read HDISKS DISK_LIST +epprd_rg:process_resources[get_disks_main:1024] IFS=: +epprd_rg:process_resources[get_disks_main:1025] HDISKS='hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8' +epprd_rg:process_resources[get_disks_main:1031] : Pick up any raw disks not returned by clrgpa +epprd_rg:process_resources[get_disks_main:1033] clodmget -q group='epprd_rg AND name=RAW_DISK' HACMPresource +epprd_rg:process_resources[get_disks_main:1033] [[ -n '' ]] +epprd_rg:process_resources[get_disks_main:1042] : Get any raw disks in resource group epprd_rg +epprd_rg:process_resources[get_disks_main:1045] print +epprd_rg:process_resources[get_disks_main:1045] read RHDISKS RDISK_LIST +epprd_rg:process_resources[get_disks_main:1045] IFS=: +epprd_rg:process_resources[get_disks_main:1046] RHDISKS='' +epprd_rg:process_resources[get_disks_main:1047] print datavg +epprd_rg:process_resources[get_disks_main:1047] read VOLUME_GROUPS +epprd_rg:process_resources[get_disks_main:1051] : At this point, the global variables below should be set to +epprd_rg:process_resources[get_disks_main:1052] : the values associated with resource group epprd_rg +epprd_rg:process_resources[get_disks_main:1054] export RESOURCE_GROUPS +epprd_rg:process_resources[get_disks_main:1055] export VOLUME_GROUPS +epprd_rg:process_resources[get_disks_main:1056] export HDISKS +epprd_rg:process_resources[get_disks_main:1057] export RHDISKS +epprd_rg:process_resources[get_disks_main:1059] [[ false == true ]] +epprd_rg:process_resources[get_disks_main:1182] get_disks +epprd_rg:process_resources[get_disks:1198] PS4_FUNC=get_disks +epprd_rg:process_resources[get_disks:1198] typeset PS4_FUNC +epprd_rg:process_resources[get_disks:1199] [[ high == high ]] +epprd_rg:process_resources[get_disks:1199] set -x +epprd_rg:process_resources[get_disks:1201] STAT=0 +epprd_rg:process_resources[get_disks:1204] : Most volume groups are Enhanced Concurrent Mode, and it should +epprd_rg:process_resources[get_disks:1205] : not be necessary to break reserves. If all the volume groups +epprd_rg:process_resources[get_disks:1206] : are ECM, we should be able to skip breaking reserves. If it +epprd_rg:process_resources[get_disks:1207] : turns out that there is a reserve on a disk in an ECM volume +epprd_rg:process_resources[get_disks:1208] : group, that will be handled by cl_pvo making an explicit call +epprd_rg:process_resources[get_disks:1209] : to cl_disk_available. +epprd_rg:process_resources[get_disks:1213] all_ecm=TRUE +epprd_rg:process_resources[get_disks:1214] IFS=: +epprd_rg:process_resources[get_disks:1214] set -- datavg +epprd_rg:process_resources[get_disks:1214] print datavg +epprd_rg:process_resources[get_disks:1216] print datavg +epprd_rg:process_resources[get_disks:1216] sort -u +epprd_rg:process_resources[get_disks:1216] tr , '\n' +epprd_rg:process_resources[get_disks:1218] clodmget -q 'name = datavg and attribute = conc_capable' -f value -n CuAt +epprd_rg:process_resources[get_disks:1218] [[ y != y ]] +epprd_rg:process_resources[get_disks:1224] [[ TRUE == FALSE ]] +epprd_rg:process_resources[get_disks:1226] [[ TRUE == TRUE ]] +epprd_rg:process_resources[get_disks:1226] return 0 +epprd_rg:process_resources[get_disks_main:1183] STAT=0 +epprd_rg:process_resources[get_disks_main:1186] return 0 +epprd_rg:process_resources[3443] tr ' ' '\n' +epprd_rg:process_resources[3443] echo +epprd_rg:process_resources[3443] FAILED_RR_RGS='' +epprd_rg:process_resources[3444] [[ -n '' ]] +epprd_rg:process_resources[3450] clodmget -n -q policy=scsi -f value HACMPsplitmerge +epprd_rg:process_resources[3450] SCSIPR_ENABLED='' +epprd_rg:process_resources[3450] typeset SCSIPR_ENABLED +epprd_rg:process_resources[3451] [[ '' == Yes ]] +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:06:53.451682 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=VGS ACTION=ACQUIRE CONCURRENT_VOLUME_GROUP='""' VOLUME_GROUPS='"datavg"' RESOURCE_GROUPS='"epprd_rg' '"' EXPORT_FILESYSTEM='""' +epprd_rg:process_resources[1] JOB_TYPE=VGS +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] CONCURRENT_VOLUME_GROUP='' +epprd_rg:process_resources[1] VOLUME_GROUPS=datavg +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] EXPORT_FILESYSTEM='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ VGS == RELEASE ]] +epprd_rg:process_resources[3360] [[ VGS == ONLINE ]] +epprd_rg:process_resources[3571] process_volume_groups_main ACQUIRE +epprd_rg:process_resources[process_volume_groups_main:2293] PS4_FUNC=process_volume_groups_main +epprd_rg:process_resources[process_volume_groups_main:2293] typeset PS4_FUNC +epprd_rg:process_resources[process_volume_groups_main:2294] [[ high == high ]] +epprd_rg:process_resources[process_volume_groups_main:2294] set -x +epprd_rg:process_resources[process_volume_groups_main:2295] DEF_VARYON_ACTION=0 +epprd_rg:process_resources[process_volume_groups_main:2295] typeset -li DEF_VARYON_ACTION +epprd_rg:process_resources[process_volume_groups_main:2296] FAILURE_IN_METHOD=0 +epprd_rg:process_resources[process_volume_groups_main:2296] typeset -li FAILURE_IN_METHOD +epprd_rg:process_resources[process_volume_groups_main:2297] ACTION=ACQUIRE +epprd_rg:process_resources[process_volume_groups_main:2297] typeset ACTION +epprd_rg:process_resources[process_volume_groups_main:2298] STAT=0 +epprd_rg:process_resources[process_volume_groups_main:2299] VG_LIST=datavg +epprd_rg:process_resources[process_volume_groups_main:2300] RG_LIST=epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2304] getReplicatedResources epprd_rg +epprd_rg:process_resources[getReplicatedResources:699] PS4_FUNC=getReplicatedResources +epprd_rg:process_resources[getReplicatedResources:699] typeset PS4_FUNC +epprd_rg:process_resources[getReplicatedResources:700] [[ high == high ]] +epprd_rg:process_resources[getReplicatedResources:700] set -x +epprd_rg:process_resources[getReplicatedResources:702] RV=false +epprd_rg:process_resources[getReplicatedResources:704] clodmget -n -f type HACMPrresmethods +epprd_rg:process_resources[getReplicatedResources:704] [[ -n 9 ]] +epprd_rg:process_resources[getReplicatedResources:707] : Replicated resource methods are defined, check for resources +epprd_rg:process_resources[getReplicatedResources:709] clodmget -q $'name like \'*_REP_RESOURCE\' AND group=epprd_rg' -f value -n HACMPresource +epprd_rg:process_resources[getReplicatedResources:709] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:718] : Verify if any backup profiles are configured and trigger cbm utilities based on that +epprd_rg:process_resources[getReplicatedResources:720] clodmget -q name=BACKUP_ENABLED -f value HACMPresource +epprd_rg:process_resources[getReplicatedResources:720] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:739] echo false +epprd_rg:process_resources[process_volume_groups_main:2304] REPLICATED_RESOURCES=false +epprd_rg:process_resources[process_volume_groups_main:2305] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2306] print -- datavg +epprd_rg:process_resources[process_volume_groups_main:2306] read VOLUME_GROUPS VG_LIST +epprd_rg:process_resources[process_volume_groups_main:2306] IFS=: +epprd_rg:process_resources[process_volume_groups_main:2307] VOLUME_GROUPS=datavg +epprd_rg:process_resources[process_volume_groups_main:2310] : At this point, these variables contain information only for epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2312] export VOLUME_GROUPS +epprd_rg:process_resources[process_volume_groups_main:2313] export RESOURCE_GROUPS +epprd_rg:process_resources[process_volume_groups_main:2315] [[ false == true ]] +epprd_rg:process_resources[process_volume_groups_main:2555] process_volume_groups ACQUIRE +epprd_rg:process_resources[process_volume_groups:2571] PS4_FUNC=process_volume_groups +epprd_rg:process_resources[process_volume_groups:2571] typeset PS4_FUNC +epprd_rg:process_resources[process_volume_groups:2572] [[ high == high ]] +epprd_rg:process_resources[process_volume_groups:2572] set -x +epprd_rg:process_resources[process_volume_groups:2573] STAT=0 +epprd_rg:process_resources[process_volume_groups:2575] GROUPNAME=epprd_rg +epprd_rg:process_resources[process_volume_groups:2575] export GROUPNAME +epprd_rg:process_resources[process_volume_groups:2578] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[process_volume_groups:2581] : Varyon the VGs in the environment +epprd_rg:process_resources[process_volume_groups:2583] cl_activate_vgs -n +epprd_rg:cl_activate_vgs[213] [[ high == high ]] +epprd_rg:cl_activate_vgs[213] version=1.46 +epprd_rg:cl_activate_vgs[215] STATUS=0 +epprd_rg:cl_activate_vgs[215] typeset -li STATUS +epprd_rg:cl_activate_vgs[216] SYNCFLAG='' +epprd_rg:cl_activate_vgs[217] CLENV='' +epprd_rg:cl_activate_vgs[218] TMP_FILENAME=/tmp/_activate_vgs.tmp +epprd_rg:cl_activate_vgs[219] USE_OEM_METHODS=false +epprd_rg:cl_activate_vgs[221] PROC_RES=false +epprd_rg:cl_activate_vgs[225] [[ VGS != 0 ]] +epprd_rg:cl_activate_vgs[225] [[ VGS != GROUP ]] +epprd_rg:cl_activate_vgs[226] PROC_RES=true +epprd_rg:cl_activate_vgs[232] [[ -n == -n ]] +epprd_rg:cl_activate_vgs[234] SYNCFLAG=-n +epprd_rg:cl_activate_vgs[235] shift +epprd_rg:cl_activate_vgs[240] (( 0 != 0 )) +epprd_rg:cl_activate_vgs[247] set -u +epprd_rg:cl_activate_vgs[250] rm -f /tmp/_activate_vgs.tmp +epprd_rg:cl_activate_vgs[254] lsvg -L -o +epprd_rg:cl_activate_vgs[254] print caavg_private rootvg +epprd_rg:cl_activate_vgs[254] VGSTATUS='caavg_private rootvg' +epprd_rg:cl_activate_vgs[257] ALLVGS=All_volume_groups +epprd_rg:cl_activate_vgs[258] cl_RMupdate resource_acquiring All_volume_groups cl_activate_vgs 2023-01-28T18:06:53.524062 2023-01-28T18:06:53.528551 +epprd_rg:cl_activate_vgs[262] [[ true == false ]] +epprd_rg:cl_activate_vgs[285] LIST_OF_VOLUME_GROUPS_FOR_RG='' +epprd_rg:cl_activate_vgs[289] export GROUPNAME +epprd_rg:cl_activate_vgs[291] echo datavg +epprd_rg:cl_activate_vgs[291] read LIST_OF_VOLUME_GROUPS_FOR_RG VOLUME_GROUPS +epprd_rg:cl_activate_vgs[291] IFS=: +epprd_rg:cl_activate_vgs[294] echo datavg +epprd_rg:cl_activate_vgs[296] sort -u +epprd_rg:cl_activate_vgs[295] tr , '\n' +epprd_rg:cl_activate_vgs[294] LIST_OF_VOLUME_GROUPS_FOR_RG=datavg +epprd_rg:cl_activate_vgs[298] vgs_list datavg +epprd_rg:cl_activate_vgs[vgs_list:178] PS4_LOOP='' +epprd_rg:cl_activate_vgs[vgs_list:178] typeset PS4_LOOP +epprd_rg:cl_activate_vgs:datavg[vgs_list:182] PS4_LOOP=datavg +epprd_rg:cl_activate_vgs:datavg[vgs_list:186] [[ 'caavg_private rootvg' == @(?(*\ )datavg?(\ *)) ]] +epprd_rg:cl_activate_vgs:datavg[vgs_list:192] : call varyon for the volume group in Foreground +epprd_rg:cl_activate_vgs:datavg[vgs_list:194] vgs_chk datavg -n cl_activate_vgs +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:78] VG=datavg +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:78] typeset VG +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:79] SYNCFLAG=-n +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:79] typeset SYNCFLAG +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:80] PROGNAME=cl_activate_vgs +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:80] typeset PROGNAME +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:81] STATUS=0 +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:81] typeset -li STATUS +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:83] [[ -n '' ]] +epprd_rg:cl_activate_vgs(0.052):datavg[vgs_chk:100] amlog_trace '' 'Activating Volume Group|datavg' +epprd_rg:cl_activate_vgs(0.052):datavg[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_vgs(0.053):datavg[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_vgs(0.078):datavg[amlog_trace:319] cltime +epprd_rg:cl_activate_vgs(0.080):datavg[amlog_trace:319] DATE=2023-01-28T18:06:53.565657 +epprd_rg:cl_activate_vgs(0.080):datavg[amlog_trace:320] echo '|2023-01-28T18:06:53.565657|INFO: Activating Volume Group|datavg' +epprd_rg:cl_activate_vgs(0.080):datavg[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_vgs(0.080):datavg[vgs_chk:102] typeset -x ERRMSG +epprd_rg:cl_activate_vgs(0.080):datavg[vgs_chk:103] clvaryonvg -n datavg +epprd_rg:clvaryonvg(0.009):datavg[985] version=1.21.7.22 +epprd_rg:clvaryonvg(0.009):datavg[989] : Without this test, cause of failure due to non-root may not be obvious +epprd_rg:clvaryonvg(0.009):datavg[991] [[ -z '' ]] +epprd_rg:clvaryonvg(0.009):datavg[991] id -nu +epprd_rg:clvaryonvg(0.010):datavg[991] 2> /dev/null +epprd_rg:clvaryonvg(0.012):datavg[991] user_name=root +epprd_rg:clvaryonvg(0.012):datavg[994] : Check if RBAC is enabled +epprd_rg:clvaryonvg(0.012):datavg[996] is_rbac_enabled='' +epprd_rg:clvaryonvg(0.012):datavg[996] typeset is_rbac_enabled +epprd_rg:clvaryonvg(0.012):datavg[997] clodmget -nq group='LDAPClient and name=RBACConfig' -f value HACMPLDAP +epprd_rg:clvaryonvg(0.013):datavg[997] 2> /dev/null +epprd_rg:clvaryonvg(0.016):datavg[997] is_rbac_enabled='' +epprd_rg:clvaryonvg(0.016):datavg[999] role='' +epprd_rg:clvaryonvg(0.016):datavg[999] typeset role +epprd_rg:clvaryonvg(0.016):datavg[1000] [[ root != root ]] +epprd_rg:clvaryonvg(0.016):datavg[1009] LEAVEOFF=FALSE +epprd_rg:clvaryonvg(0.016):datavg[1010] FORCEON='' +epprd_rg:clvaryonvg(0.016):datavg[1011] FORCEUPD=FALSE +epprd_rg:clvaryonvg(0.016):datavg[1012] NOQUORUM=20 +epprd_rg:clvaryonvg(0.016):datavg[1013] MISSING_UPDATES=30 +epprd_rg:clvaryonvg(0.016):datavg[1014] DATA_DIVERGENCE=31 +epprd_rg:clvaryonvg(0.016):datavg[1015] ARGS='' +epprd_rg:clvaryonvg(0.016):datavg[1016] typeset -li varyonvg_rc +epprd_rg:clvaryonvg(0.016):datavg[1017] typeset -li MAXLVS +epprd_rg:clvaryonvg(0.016):datavg[1018] ENODEV=19 +epprd_rg:clvaryonvg(0.016):datavg[1018] typeset -li ENODEV +epprd_rg:clvaryonvg(0.016):datavg[1020] set -u +epprd_rg:clvaryonvg(0.016):datavg[1022] /bin/dspmsg -s 2 cspoc.cat 31 'usage: clvaryonvg [-F] [-f] [-n] [-p] [-s] [-o] \n' +epprd_rg:clvaryonvg(0.018):datavg[1022] USAGE='usage: clvaryonvg [-F] [-f] [-n] [-p] [-s] [-o] ' +epprd_rg:clvaryonvg(0.018):datavg[1023] (( 2 < 1 )) +epprd_rg:clvaryonvg(0.018):datavg[1029] : Parse the options +epprd_rg:clvaryonvg(0.018):datavg[1031] S_FLAG='' +epprd_rg:clvaryonvg(0.018):datavg[1032] P_FLAG='' +epprd_rg:clvaryonvg(0.018):datavg[1033] getopts :Ffnops option +epprd_rg:clvaryonvg(0.019):datavg[1038] : -n Always applied, retained for compatibility +epprd_rg:clvaryonvg(0.019):datavg[1033] getopts :Ffnops option +epprd_rg:clvaryonvg(0.019):datavg[1048] : Pick up the volume group name, which follows the options +epprd_rg:clvaryonvg(0.019):datavg[1050] shift 1 +epprd_rg:clvaryonvg(0.019):datavg[1051] VG=datavg +epprd_rg:clvaryonvg(0.019):datavg[1054] : Set up filenames we will be using +epprd_rg:clvaryonvg(0.019):datavg[1056] VGDIR=/usr/es/sbin/cluster/etc/vg/ +epprd_rg:clvaryonvg(0.019):datavg[1057] TSFILE=/usr/es/sbin/cluster/etc/vg/datavg.tstamp +epprd_rg:clvaryonvg(0.019):datavg[1058] DSFILE=/usr/es/sbin/cluster/etc/vg/datavg.desc +epprd_rg:clvaryonvg(0.019):datavg[1059] RPFILE=/usr/es/sbin/cluster/etc/vg/datavg.replay +epprd_rg:clvaryonvg(0.019):datavg[1060] permset=/usr/es/sbin/cluster/etc/vg/datavg.perms +epprd_rg:clvaryonvg(0.019):datavg[1061] failfile=/usr/es/sbin/cluster/etc/vg/datavg.fail +epprd_rg:clvaryonvg(0.019):datavg[1065] : Get some LVM information we are going to need in processing this +epprd_rg:clvaryonvg(0.019):datavg[1066] : volume group: +epprd_rg:clvaryonvg(0.019):datavg[1067] : - volume group identifier - vgid +epprd_rg:clvaryonvg(0.019):datavg[1068] : - list of disks +epprd_rg:clvaryonvg(0.019):datavg[1069] : - quorum indicator +epprd_rg:clvaryonvg(0.019):datavg[1070] : - timestamp if present +epprd_rg:clvaryonvg(0.019):datavg[1072] /usr/sbin/getlvodm -v datavg +epprd_rg:clvaryonvg(0.022):datavg[1072] VGID=00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(0.023):datavg[1073] cut '-d ' -f2 +epprd_rg:clvaryonvg(0.023):datavg[1073] /usr/sbin/getlvodm -w 00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(0.027):datavg[1073] pvlst=$'hdisk2\nhdisk3\nhdisk4\nhdisk5\nhdisk6\nhdisk7\nhdisk8' +epprd_rg:clvaryonvg(0.027):datavg[1074] /usr/sbin/getlvodm -Q datavg +epprd_rg:clvaryonvg(0.030):datavg[1074] quorum=y +epprd_rg:clvaryonvg(0.030):datavg[1075] TS_FROM_DISK='' +epprd_rg:clvaryonvg(0.030):datavg[1076] TS_FROM_ODM='' +epprd_rg:clvaryonvg(0.030):datavg[1077] GOOD_PV='' +epprd_rg:clvaryonvg(0.030):datavg[1078] O_flag='' +epprd_rg:clvaryonvg(0.030):datavg[1079] A_flag='' +epprd_rg:clvaryonvg(0.030):datavg[1080] mode_flag='' +epprd_rg:clvaryonvg(0.030):datavg[1081] vg_on_mode='' +epprd_rg:clvaryonvg(0.030):datavg[1082] vg_set_passive=FALSE +epprd_rg:clvaryonvg(0.030):datavg[1084] odmget -q 'attribute = varyon_state' PdAt +epprd_rg:clvaryonvg(0.033):datavg[1084] [[ -n $'\nPdAt:\n\tuniquetype = "logical_volume/vgsubclass/vgtype"\n\tattribute = "varyon_state"\n\tdeflt = "0"\n\tvalues = "0,1,2,3"\n\twidth = ""\n\ttype = "R"\n\tgeneric = ""\n\trep = "l"\n\tnls_index = 0' ]] +epprd_rg:clvaryonvg(0.033):datavg[1087] : LVM may record that a volume group was varied on from an earlier +epprd_rg:clvaryonvg(0.033):datavg[1088] : IPL. Rely on HA state tracking, and override the LVM check +epprd_rg:clvaryonvg(0.033):datavg[1090] O_flag=-O +epprd_rg:clvaryonvg(0.033):datavg[1093] : Checking if SCSI PR is enabled and it is so, +epprd_rg:clvaryonvg(0.033):datavg[1094] : confirming if the SCSI PR reservations are intact. +epprd_rg:clvaryonvg(0.034):datavg[1096] lssrc -ls clstrmgrES +epprd_rg:clvaryonvg(0.034):datavg[1096] 2>& 1 +epprd_rg:clvaryonvg(0.035):datavg[1096] egrep -q -v 'ST_INIT|NOT_CONFIGURED' +epprd_rg:clvaryonvg(0.035):datavg[1096] grep 'Current state:' +epprd_rg:clvaryonvg(0.050):datavg[1098] clodmget -n -q policy=scsi -f value HACMPsplitmerge +epprd_rg:clvaryonvg(0.053):datavg[1098] SCSIPR_ENABLED='' +epprd_rg:clvaryonvg(0.053):datavg[1098] typeset SCSIPR_ENABLED +epprd_rg:clvaryonvg(0.053):datavg[1099] clodmget -q $'name like \'*VOLUME_GROUP\' and value = datavg' -f group -n HACMPresource +epprd_rg:clvaryonvg(0.056):datavg[1099] resgrp=epprd_rg +epprd_rg:clvaryonvg(0.056):datavg[1099] typeset resgrp +epprd_rg:clvaryonvg(0.056):datavg[1100] [[ '' == Yes ]] +epprd_rg:clvaryonvg(0.056):datavg[1134] : Operations such as varying on the volume group are likely to +epprd_rg:clvaryonvg(0.056):datavg[1135] : require read/write access. So, set any volume group fencing appropriately. +epprd_rg:clvaryonvg(0.056):datavg[1137] cl_set_vg_fence_height -c datavg rw +epprd_rg:clvaryonvg(0.060):datavg[1138] RC=0 +epprd_rg:clvaryonvg(0.060):datavg[1139] (( 19 == 0 )) +epprd_rg:clvaryonvg(0.060):datavg[1147] : Return code from volume group fencing for datavg is 0 +epprd_rg:clvaryonvg(0.060):datavg[1148] (( 0 != 0 )) +epprd_rg:clvaryonvg(0.060):datavg[1160] : Check on the current state of the volume group +epprd_rg:clvaryonvg(0.061):datavg[1182] grep -x -q datavg +epprd_rg:clvaryonvg(0.061):datavg[1182] lsvg -L +epprd_rg:clvaryonvg(0.065):datavg[1184] : The volume group is known - check to see if its already varyd on. +epprd_rg:clvaryonvg(0.066):datavg[1186] grep -x -q datavg +epprd_rg:clvaryonvg(0.066):datavg[1186] lsvg -L -o +epprd_rg:clvaryonvg(0.070):datavg[1190] lsvg -L datavg +epprd_rg:clvaryonvg(0.070):datavg[1190] 2> /dev/null +epprd_rg:clvaryonvg(0.070):datavg[1190] grep -q -i -w passive-only +epprd_rg:clvaryonvg(0.112):datavg[1191] vg_on_mode=passive +epprd_rg:clvaryonvg(0.114):datavg[1194] grep -iw removed +epprd_rg:clvaryonvg(0.114):datavg[1194] lsvg -p datavg +epprd_rg:clvaryonvg(0.114):datavg[1194] 2> /dev/null +epprd_rg:clvaryonvg(0.134):datavg[1194] removed_disks='' +epprd_rg:clvaryonvg(0.134):datavg[1195] [[ -n '' ]] +epprd_rg:clvaryonvg(0.134):datavg[1213] [[ -n passive ]] +epprd_rg:clvaryonvg(0.134):datavg[1215] lqueryvg -g 00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(0.135):datavg[1215] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.153):datavg[1321] : +epprd_rg:clvaryonvg(0.153):datavg[1322] : First, sniff at the disk to see if the local ODM information +epprd_rg:clvaryonvg(0.153):datavg[1323] : matches what is on the disk. +epprd_rg:clvaryonvg(0.153):datavg[1324] : +epprd_rg:clvaryonvg(0.153):datavg[1326] vgdatimestamps +epprd_rg:clvaryonvg(0.153):datavg[vgdatimestamps:201] PS4_FUNC=vgdatimestamps +epprd_rg:clvaryonvg(0.153):datavg[vgdatimestamps:201] typeset PS4_FUNC +epprd_rg:clvaryonvg(0.153):datavg[vgdatimestamps:202] [[ high == high ]] +epprd_rg:clvaryonvg(0.153):datavg[vgdatimestamps:202] set -x +epprd_rg:clvaryonvg(0.153):datavg[vgdatimestamps:203] set -u +epprd_rg:clvaryonvg(0.153):datavg[vgdatimestamps:206] : See what timestamp LVM has recorded from the last time it checked +epprd_rg:clvaryonvg(0.153):datavg[vgdatimestamps:207] : the disks +epprd_rg:clvaryonvg(0.153):datavg[vgdatimestamps:209] /usr/sbin/getlvodm -T 00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(0.154):datavg[vgdatimestamps:209] 2> /dev/null +epprd_rg:clvaryonvg(0.157):datavg[vgdatimestamps:209] TS_FROM_ODM=63d4e56032aa2e89 +epprd_rg:clvaryonvg(0.157):datavg[vgdatimestamps:212] : Check to see if HACMP is maintaining a timestamp for this volume group +epprd_rg:clvaryonvg(0.157):datavg[vgdatimestamps:213] : Needed for some older volume groups +epprd_rg:clvaryonvg(0.157):datavg[vgdatimestamps:215] [[ -s /usr/es/sbin/cluster/etc/vg/datavg.tstamp ]] +epprd_rg:clvaryonvg(0.157):datavg[vgdatimestamps:234] : Get the time stamp from the actual disk +epprd_rg:clvaryonvg(0.157):datavg[vgdatimestamps:236] clvgdats /dev/datavg +epprd_rg:clvaryonvg(0.158):datavg[vgdatimestamps:236] 2> /dev/null +epprd_rg:clvaryonvg(0.167):datavg[vgdatimestamps:236] TS_FROM_DISK=63d4e56032aa2e89 +epprd_rg:clvaryonvg(0.168):datavg[vgdatimestamps:237] clvgdats_rc=0 +epprd_rg:clvaryonvg(0.168):datavg[vgdatimestamps:238] (( 0 != 0 )) +epprd_rg:clvaryonvg(0.168):datavg[vgdatimestamps:247] [[ -z 63d4e56032aa2e89 ]] +epprd_rg:clvaryonvg(0.168):datavg[1328] [[ 63d4e56032aa2e89 != 63d4e56032aa2e89 ]] +epprd_rg:clvaryonvg(0.168):datavg[1344] : There is a chance that a VG that should be in passive mode is not. +epprd_rg:clvaryonvg(0.168):datavg[1345] : Run cl_pvo to put it in passive mode if possible. +epprd_rg:clvaryonvg(0.168):datavg[1350] [[ -z passive ]] +epprd_rg:clvaryonvg(0.168):datavg[1350] [[ passive == ordinary ]] +epprd_rg:clvaryonvg(0.168):datavg[1350] [[ passive == passive ]] +epprd_rg:clvaryonvg(0.168):datavg[1350] [[ -n '' ]] +epprd_rg:clvaryonvg(0.168):datavg[1381] : Let us assume that the old style synclvodm would sync all the PV/FS changes. +epprd_rg:clvaryonvg(0.168):datavg[1383] expimpvg_notrequired=1 +epprd_rg:clvaryonvg(0.168):datavg[1386] : Optimistically give varyonvg a try. +epprd_rg:clvaryonvg(0.168):datavg[1388] [[ passive == passive ]] +epprd_rg:clvaryonvg(0.168):datavg[1391] : If the volume group was varyd on in passive mode when this node came +epprd_rg:clvaryonvg(0.168):datavg[1392] : up, flip it over to active mode. Following logic will then fall +epprd_rg:clvaryonvg(0.168):datavg[1393] : through to updatefs. +epprd_rg:clvaryonvg(0.168):datavg[1395] [[ passive == passive ]] +epprd_rg:clvaryonvg(0.168):datavg[1395] A_flag=-A +epprd_rg:clvaryonvg(0.168):datavg[1396] varyonvg -n -c -A -O datavg +epprd_rg:clvaryonvg(0.169):datavg[1396] 2>& 1 +epprd_rg:clvaryonvg(0.394):datavg[1396] varyonvg_output='' +epprd_rg:clvaryonvg(0.394):datavg[1397] varyonvg_rc=0 +epprd_rg:clvaryonvg(0.394):datavg[1397] typeset -li varyonvg_rc +epprd_rg:clvaryonvg(0.394):datavg[1399] (( 0 != 0 )) +epprd_rg:clvaryonvg(0.394):datavg[1481] (( 0 != 0 )) +epprd_rg:clvaryonvg(0.394):datavg[1576] : At this point, datavg should be varied on +epprd_rg:clvaryonvg(0.395):datavg[1578] [[ FALSE == TRUE ]] +epprd_rg:clvaryonvg(0.395):datavg[1585] [[ -z 63d4e56032aa2e89 ]] +epprd_rg:clvaryonvg(0.395):datavg[1592] vgdatimestamps +epprd_rg:clvaryonvg(0.395):datavg[vgdatimestamps:201] PS4_FUNC=vgdatimestamps +epprd_rg:clvaryonvg(0.395):datavg[vgdatimestamps:201] typeset PS4_FUNC +epprd_rg:clvaryonvg(0.395):datavg[vgdatimestamps:202] [[ high == high ]] +epprd_rg:clvaryonvg(0.395):datavg[vgdatimestamps:202] set -x +epprd_rg:clvaryonvg(0.395):datavg[vgdatimestamps:203] set -u +epprd_rg:clvaryonvg(0.395):datavg[vgdatimestamps:206] : See what timestamp LVM has recorded from the last time it checked +epprd_rg:clvaryonvg(0.395):datavg[vgdatimestamps:207] : the disks +epprd_rg:clvaryonvg(0.395):datavg[vgdatimestamps:209] /usr/sbin/getlvodm -T 00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(0.396):datavg[vgdatimestamps:209] 2> /dev/null +epprd_rg:clvaryonvg(0.398):datavg[vgdatimestamps:209] TS_FROM_ODM=63d4e5ad2e52f576 +epprd_rg:clvaryonvg(0.398):datavg[vgdatimestamps:212] : Check to see if HACMP is maintaining a timestamp for this volume group +epprd_rg:clvaryonvg(0.398):datavg[vgdatimestamps:213] : Needed for some older volume groups +epprd_rg:clvaryonvg(0.398):datavg[vgdatimestamps:215] [[ -s /usr/es/sbin/cluster/etc/vg/datavg.tstamp ]] +epprd_rg:clvaryonvg(0.398):datavg[vgdatimestamps:234] : Get the time stamp from the actual disk +epprd_rg:clvaryonvg(0.398):datavg[vgdatimestamps:236] clvgdats /dev/datavg +epprd_rg:clvaryonvg(0.399):datavg[vgdatimestamps:236] 2> /dev/null +epprd_rg:clvaryonvg(0.409):datavg[vgdatimestamps:236] TS_FROM_DISK=63d4e5ad2e52f576 +epprd_rg:clvaryonvg(0.409):datavg[vgdatimestamps:237] clvgdats_rc=0 +epprd_rg:clvaryonvg(0.409):datavg[vgdatimestamps:238] (( 0 != 0 )) +epprd_rg:clvaryonvg(0.409):datavg[vgdatimestamps:247] [[ -z 63d4e5ad2e52f576 ]] +epprd_rg:clvaryonvg(0.409):datavg[1600] [[ 63d4e5ad2e52f576 != 63d4e5ad2e52f576 ]] +epprd_rg:clvaryonvg(0.409):datavg[1622] [[ FALSE == TRUE ]] +epprd_rg:clvaryonvg(0.409):datavg[1633] : Even if everything looks OK, update the local file system +epprd_rg:clvaryonvg(0.409):datavg[1634] : definitions, since changes there do not show up in the +epprd_rg:clvaryonvg(0.409):datavg[1635] : VGDA timestamps +epprd_rg:clvaryonvg(0.409):datavg[1637] updatefs datavg +epprd_rg:clvaryonvg(0.409):datavg[updatefs:506] PS4_FUNC=updatefs +epprd_rg:clvaryonvg(0.409):datavg[updatefs:506] typeset PS4_FUNC +epprd_rg:clvaryonvg(0.409):datavg[updatefs:507] [[ high == high ]] +epprd_rg:clvaryonvg(0.409):datavg[updatefs:507] set -x +epprd_rg:clvaryonvg(0.409):datavg[updatefs:508] do_imfs='' +epprd_rg:clvaryonvg(0.409):datavg[updatefs:508] typeset do_imfs +epprd_rg:clvaryonvg(0.409):datavg[updatefs:509] has_typed_lvs='' +epprd_rg:clvaryonvg(0.409):datavg[updatefs:509] typeset has_typed_lvs +epprd_rg:clvaryonvg(0.409):datavg[updatefs:512] : Delete existing filesystem information for this volume group. This is +epprd_rg:clvaryonvg(0.409):datavg[updatefs:513] : needed because imfs will not update an existing /etc/filesystems entry. +epprd_rg:clvaryonvg(0.411):datavg[updatefs:515] cut -f1 '-d ' +epprd_rg:clvaryonvg(0.411):datavg[updatefs:515] /usr/sbin/getlvodm -L datavg +epprd_rg:clvaryonvg(0.415):datavg[updatefs:515] lv_list=$'saplv\nsapmntlv\noraclelv\nepplv\noraarchlv\nsapdata1lv\nsapdata2lv\nsapdata3lv\nsapdata4lv\nboardlv\noriglogAlv\noriglogBlv\nmirrlogAlv\nmirrlogBlv\nepprdaloglv' +epprd_rg:clvaryonvg(0.415):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.415):datavg[updatefs:521] clodmget -q 'name = saplv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.418):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.418):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.418):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.418):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.418):datavg[updatefs:530] /usr/sbin/getlvcb -f saplv +epprd_rg:clvaryonvg(0.419):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.438):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.438):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.438):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.439):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.439):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.443):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.443):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.443):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.443):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.444):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.463):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.463):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.463):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.463):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.464):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.464):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.467):datavg[updatefs:545] /usr/sbin/imfs -lx saplv +epprd_rg:clvaryonvg(0.472):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.472):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.472):datavg[updatefs:521] clodmget -q 'name = sapmntlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.475):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.475):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.475):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.475):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.475):datavg[updatefs:530] /usr/sbin/getlvcb -f sapmntlv +epprd_rg:clvaryonvg(0.476):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.494):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.494):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.495):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.496):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.496):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.500):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.500):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.500):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.500):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.501):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.520):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.520):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.520):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.520):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.521):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.521):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.524):datavg[updatefs:545] /usr/sbin/imfs -lx sapmntlv +epprd_rg:clvaryonvg(0.529):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.529):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.529):datavg[updatefs:521] clodmget -q 'name = oraclelv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.532):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.532):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.532):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.532):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.532):datavg[updatefs:530] /usr/sbin/getlvcb -f oraclelv +epprd_rg:clvaryonvg(0.533):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.552):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.552):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.552):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.553):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.553):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.557):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.557):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.557):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.557):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.558):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.577):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.577):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.577):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.577):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.578):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.578):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.582):datavg[updatefs:545] /usr/sbin/imfs -lx oraclelv +epprd_rg:clvaryonvg(0.586):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.586):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.586):datavg[updatefs:521] clodmget -q 'name = epplv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.589):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.589):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.589):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.589):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.589):datavg[updatefs:530] /usr/sbin/getlvcb -f epplv +epprd_rg:clvaryonvg(0.590):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.608):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.608):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.608):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.610):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.610):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.614):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.614):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.614):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.614):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.615):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.634):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.634):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.634):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.634):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.635):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.635):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.638):datavg[updatefs:545] /usr/sbin/imfs -lx epplv +epprd_rg:clvaryonvg(0.643):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.643):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.643):datavg[updatefs:521] clodmget -q 'name = oraarchlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.646):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.646):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.646):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.646):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.646):datavg[updatefs:530] /usr/sbin/getlvcb -f oraarchlv +epprd_rg:clvaryonvg(0.647):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.665):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.665):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.665):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.667):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.667):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.671):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.671):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.671):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.671):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.672):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.691):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.691):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.691):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.691):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.692):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.692):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.695):datavg[updatefs:545] /usr/sbin/imfs -lx oraarchlv +epprd_rg:clvaryonvg(0.700):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.700):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.700):datavg[updatefs:521] clodmget -q 'name = sapdata1lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.703):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.703):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.703):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.703):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.703):datavg[updatefs:530] /usr/sbin/getlvcb -f sapdata1lv +epprd_rg:clvaryonvg(0.704):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.722):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.723):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.723):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.724):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.724):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.728):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.728):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.728):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.728):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.729):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.749):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.749):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.749):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.749):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.750):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.750):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.753):datavg[updatefs:545] /usr/sbin/imfs -lx sapdata1lv +epprd_rg:clvaryonvg(0.757):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.757):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.757):datavg[updatefs:521] clodmget -q 'name = sapdata2lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.760):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.761):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.761):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.761):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.761):datavg[updatefs:530] /usr/sbin/getlvcb -f sapdata2lv +epprd_rg:clvaryonvg(0.762):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.780):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.780):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.780):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.782):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.782):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.785):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.785):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.785):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.785):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.787):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.806):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.806):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.806):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.806):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.807):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.807):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.811):datavg[updatefs:545] /usr/sbin/imfs -lx sapdata2lv +epprd_rg:clvaryonvg(0.815):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.815):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.815):datavg[updatefs:521] clodmget -q 'name = sapdata3lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.818):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.818):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.818):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.818):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.818):datavg[updatefs:530] /usr/sbin/getlvcb -f sapdata3lv +epprd_rg:clvaryonvg(0.819):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.837):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.837):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.837):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.839):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.839):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.842):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.842):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.842):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.842):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.844):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.862):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.862):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.862):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.862):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.863):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.863):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.866):datavg[updatefs:545] /usr/sbin/imfs -lx sapdata3lv +epprd_rg:clvaryonvg(0.871):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.871):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.871):datavg[updatefs:521] clodmget -q 'name = sapdata4lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.874):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.874):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.874):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.874):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.874):datavg[updatefs:530] /usr/sbin/getlvcb -f sapdata4lv +epprd_rg:clvaryonvg(0.875):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.893):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.893):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.893):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.894):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.895):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.898):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.898):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.898):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.898):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.899):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.918):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.918):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.918):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.918):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.919):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.919):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.922):datavg[updatefs:545] /usr/sbin/imfs -lx sapdata4lv +epprd_rg:clvaryonvg(0.926):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.926):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.926):datavg[updatefs:521] clodmget -q 'name = boardlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.929):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.930):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.930):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.930):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.930):datavg[updatefs:530] /usr/sbin/getlvcb -f boardlv +epprd_rg:clvaryonvg(0.931):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.949):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.949):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.949):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.951):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.951):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.954):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.954):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.954):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.954):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.955):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.974):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.974):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.974):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.974):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.975):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.975):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.978):datavg[updatefs:545] /usr/sbin/imfs -lx boardlv +epprd_rg:clvaryonvg(0.982):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.982):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.982):datavg[updatefs:521] clodmget -q 'name = origlogAlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.986):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.986):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.986):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.986):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.986):datavg[updatefs:530] /usr/sbin/getlvcb -f origlogAlv +epprd_rg:clvaryonvg(0.987):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.004):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.004):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.004):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.006):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.006):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(1.009):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.009):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.009):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.009):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.011):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.029):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.029):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.029):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.029):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.030):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.030):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.033):datavg[updatefs:545] /usr/sbin/imfs -lx origlogAlv +epprd_rg:clvaryonvg(1.038):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.038):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.038):datavg[updatefs:521] clodmget -q 'name = origlogBlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.041):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.041):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.041):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.041):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.041):datavg[updatefs:530] /usr/sbin/getlvcb -f origlogBlv +epprd_rg:clvaryonvg(1.042):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.060):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.060):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.060):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.062):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.062):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(1.066):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.066):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.066):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.066):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.067):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.086):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.086):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.086):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.086):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.087):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.087):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.090):datavg[updatefs:545] /usr/sbin/imfs -lx origlogBlv +epprd_rg:clvaryonvg(1.094):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.094):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.094):datavg[updatefs:521] clodmget -q 'name = mirrlogAlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.098):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.098):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.098):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.098):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.098):datavg[updatefs:530] /usr/sbin/getlvcb -f mirrlogAlv +epprd_rg:clvaryonvg(1.099):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.116):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.116):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.116):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.118):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.118):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(1.121):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.121):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.121):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.121):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.123):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.142):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.142):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.142):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.142):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.143):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.143):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.146):datavg[updatefs:545] /usr/sbin/imfs -lx mirrlogAlv +epprd_rg:clvaryonvg(1.150):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.150):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.150):datavg[updatefs:521] clodmget -q 'name = mirrlogBlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.154):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.154):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.154):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.154):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.154):datavg[updatefs:530] /usr/sbin/getlvcb -f mirrlogBlv +epprd_rg:clvaryonvg(1.155):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.172):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.172):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.172):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.174):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.174):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(1.177):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.177):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.177):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.177):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.179):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.197):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.197):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.197):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.197):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.198):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.198):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.201):datavg[updatefs:545] /usr/sbin/imfs -lx mirrlogBlv +epprd_rg:clvaryonvg(1.205):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.205):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.205):datavg[updatefs:521] clodmget -q 'name = epprdaloglv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.209):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.209):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.209):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.209):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.209):datavg[updatefs:530] /usr/sbin/getlvcb -f epprdaloglv +epprd_rg:clvaryonvg(1.210):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.227):datavg[updatefs:530] fs_info=' ' +epprd_rg:clvaryonvg(1.227):datavg[updatefs:531] [[ -n ' ' ]] +epprd_rg:clvaryonvg(1.227):datavg[updatefs:531] [[ ' ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.227):datavg[updatefs:552] [[ -n true ]] +epprd_rg:clvaryonvg(1.227):datavg[updatefs:556] : Pick up any file system changes that may have happened when +epprd_rg:clvaryonvg(1.227):datavg[updatefs:557] : the volume group was owned by another node. That is, if a +epprd_rg:clvaryonvg(1.227):datavg[updatefs:558] : local change was made - not through C-SPOC, we whould have no +epprd_rg:clvaryonvg(1.227):datavg[updatefs:559] : indication it happened. +epprd_rg:clvaryonvg(1.227):datavg[updatefs:561] [[ -z '' ]] +epprd_rg:clvaryonvg(1.227):datavg[updatefs:563] /usr/sbin/imfs datavg +epprd_rg:clvaryonvg(1.910):datavg[updatefs:589] : For a valid file system configuration, the mount point in +epprd_rg:clvaryonvg(1.911):datavg[updatefs:590] : /etc/filesystems for the logical volume should match the +epprd_rg:clvaryonvg(1.911):datavg[updatefs:591] : label of the logical volume. The above imfs should have +epprd_rg:clvaryonvg(1.911):datavg[updatefs:592] : matched those two. Now, check that they match the label +epprd_rg:clvaryonvg(1.911):datavg[updatefs:593] : for the logical volume as saved in ODM. +epprd_rg:clvaryonvg(1.911):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.911):datavg[updatefs:600] clodmget -q 'name = saplv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.914):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(1.914):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(1.914):datavg[updatefs:607] /usr/sbin/getlvcb -f saplv +epprd_rg:clvaryonvg(1.931):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.931):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(1.931):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(1.931):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(1.931):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(1.931):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.931):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.931):datavg[updatefs:623] : Label and file system type from LVCB on disk for saplv +epprd_rg:clvaryonvg(1.932):datavg[updatefs:625] getlvcb -T -A saplv +epprd_rg:clvaryonvg(1.933):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(1.936):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(1.939):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(1.940):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(1.953):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(1.953):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(1.953):datavg[updatefs:632] : Mount point in /etc/filesystems for saplv +epprd_rg:clvaryonvg(1.956):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(1.955):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/saplv$' /etc/filesystems +epprd_rg:clvaryonvg(1.959):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(1.962):datavg[updatefs:634] fs_mount_point=/usr/sap +epprd_rg:clvaryonvg(1.962):datavg[updatefs:637] : CuAt label attribute for saplv +epprd_rg:clvaryonvg(1.962):datavg[updatefs:639] clodmget -q 'name = saplv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(1.966):datavg[updatefs:639] CuAt_label=/usr/sap +epprd_rg:clvaryonvg(1.967):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(1.968):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(1.971):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(1.971):datavg[updatefs:657] [[ -z /usr/sap ]] +epprd_rg:clvaryonvg(1.971):datavg[updatefs:657] [[ /usr/sap == None ]] +epprd_rg:clvaryonvg(1.971):datavg[updatefs:665] [[ /usr/sap == /usr/sap ]] +epprd_rg:clvaryonvg(1.971):datavg[updatefs:665] [[ /usr/sap != /usr/sap ]] +epprd_rg:clvaryonvg(1.971):datavg[updatefs:685] [[ /usr/sap != /usr/sap ]] +epprd_rg:clvaryonvg(1.971):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.971):datavg[updatefs:600] clodmget -q 'name = sapmntlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.975):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(1.975):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(1.975):datavg[updatefs:607] /usr/sbin/getlvcb -f sapmntlv +epprd_rg:clvaryonvg(1.992):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.992):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(1.992):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(1.992):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(1.992):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(1.992):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.992):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.992):datavg[updatefs:623] : Label and file system type from LVCB on disk for sapmntlv +epprd_rg:clvaryonvg(1.993):datavg[updatefs:625] getlvcb -T -A sapmntlv +epprd_rg:clvaryonvg(1.994):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(1.997):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.000):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.002):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.014):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.014):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.014):datavg[updatefs:632] : Mount point in /etc/filesystems for sapmntlv +epprd_rg:clvaryonvg(2.016):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/sapmntlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.018):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.020):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.023):datavg[updatefs:634] fs_mount_point=/sapmnt +epprd_rg:clvaryonvg(2.023):datavg[updatefs:637] : CuAt label attribute for sapmntlv +epprd_rg:clvaryonvg(2.023):datavg[updatefs:639] clodmget -q 'name = sapmntlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.027):datavg[updatefs:639] CuAt_label=/sapmnt +epprd_rg:clvaryonvg(2.029):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.030):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.034):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.034):datavg[updatefs:657] [[ -z /sapmnt ]] +epprd_rg:clvaryonvg(2.034):datavg[updatefs:657] [[ /sapmnt == None ]] +epprd_rg:clvaryonvg(2.034):datavg[updatefs:665] [[ /sapmnt == /sapmnt ]] +epprd_rg:clvaryonvg(2.034):datavg[updatefs:665] [[ /sapmnt != /sapmnt ]] +epprd_rg:clvaryonvg(2.034):datavg[updatefs:685] [[ /sapmnt != /sapmnt ]] +epprd_rg:clvaryonvg(2.034):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.034):datavg[updatefs:600] clodmget -q 'name = oraclelv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.038):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.038):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.038):datavg[updatefs:607] /usr/sbin/getlvcb -f oraclelv +epprd_rg:clvaryonvg(2.056):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.056):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.056):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.056):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.056):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.056):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.056):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.056):datavg[updatefs:623] : Label and file system type from LVCB on disk for oraclelv +epprd_rg:clvaryonvg(2.057):datavg[updatefs:625] getlvcb -T -A oraclelv +epprd_rg:clvaryonvg(2.057):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.060):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.063):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.065):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.078):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.078):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.078):datavg[updatefs:632] : Mount point in /etc/filesystems for oraclelv +epprd_rg:clvaryonvg(2.080):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/oraclelv$' /etc/filesystems +epprd_rg:clvaryonvg(2.082):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.084):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.087):datavg[updatefs:634] fs_mount_point=/oracle +epprd_rg:clvaryonvg(2.087):datavg[updatefs:637] : CuAt label attribute for oraclelv +epprd_rg:clvaryonvg(2.087):datavg[updatefs:639] clodmget -q 'name = oraclelv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.090):datavg[updatefs:639] CuAt_label=/oracle +epprd_rg:clvaryonvg(2.092):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.093):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.096):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.096):datavg[updatefs:657] [[ -z /oracle ]] +epprd_rg:clvaryonvg(2.096):datavg[updatefs:657] [[ /oracle == None ]] +epprd_rg:clvaryonvg(2.096):datavg[updatefs:665] [[ /oracle == /oracle ]] +epprd_rg:clvaryonvg(2.096):datavg[updatefs:665] [[ /oracle != /oracle ]] +epprd_rg:clvaryonvg(2.096):datavg[updatefs:685] [[ /oracle != /oracle ]] +epprd_rg:clvaryonvg(2.096):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.096):datavg[updatefs:600] clodmget -q 'name = epplv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.100):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.100):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.100):datavg[updatefs:607] /usr/sbin/getlvcb -f epplv +epprd_rg:clvaryonvg(2.117):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.117):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.117):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.117):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.117):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.117):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.117):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.117):datavg[updatefs:623] : Label and file system type from LVCB on disk for epplv +epprd_rg:clvaryonvg(2.118):datavg[updatefs:625] getlvcb -T -A epplv +epprd_rg:clvaryonvg(2.118):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.121):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.124):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.126):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.139):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.139):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.139):datavg[updatefs:632] : Mount point in /etc/filesystems for epplv +epprd_rg:clvaryonvg(2.140):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/epplv$' /etc/filesystems +epprd_rg:clvaryonvg(2.143):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.144):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.147):datavg[updatefs:634] fs_mount_point=/oracle/EPP +epprd_rg:clvaryonvg(2.148):datavg[updatefs:637] : CuAt label attribute for epplv +epprd_rg:clvaryonvg(2.148):datavg[updatefs:639] clodmget -q 'name = epplv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.151):datavg[updatefs:639] CuAt_label=/oracle/EPP +epprd_rg:clvaryonvg(2.152):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.153):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.157):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.157):datavg[updatefs:657] [[ -z /oracle/EPP ]] +epprd_rg:clvaryonvg(2.157):datavg[updatefs:657] [[ /oracle/EPP == None ]] +epprd_rg:clvaryonvg(2.157):datavg[updatefs:665] [[ /oracle/EPP == /oracle/EPP ]] +epprd_rg:clvaryonvg(2.157):datavg[updatefs:665] [[ /oracle/EPP != /oracle/EPP ]] +epprd_rg:clvaryonvg(2.157):datavg[updatefs:685] [[ /oracle/EPP != /oracle/EPP ]] +epprd_rg:clvaryonvg(2.157):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.157):datavg[updatefs:600] clodmget -q 'name = oraarchlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.160):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.160):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.160):datavg[updatefs:607] /usr/sbin/getlvcb -f oraarchlv +epprd_rg:clvaryonvg(2.178):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.178):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.178):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.178):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.178):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.178):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.178):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.178):datavg[updatefs:623] : Label and file system type from LVCB on disk for oraarchlv +epprd_rg:clvaryonvg(2.179):datavg[updatefs:625] getlvcb -T -A oraarchlv +epprd_rg:clvaryonvg(2.179):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.182):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.185):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.187):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.200):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.200):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.200):datavg[updatefs:632] : Mount point in /etc/filesystems for oraarchlv +epprd_rg:clvaryonvg(2.201):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/oraarchlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.204):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.205):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.208):datavg[updatefs:634] fs_mount_point=/oracle/EPP/oraarch +epprd_rg:clvaryonvg(2.208):datavg[updatefs:637] : CuAt label attribute for oraarchlv +epprd_rg:clvaryonvg(2.208):datavg[updatefs:639] clodmget -q 'name = oraarchlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.212):datavg[updatefs:639] CuAt_label=/oracle/EPP/oraarch +epprd_rg:clvaryonvg(2.213):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.214):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.217):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.218):datavg[updatefs:657] [[ -z /oracle/EPP/oraarch ]] +epprd_rg:clvaryonvg(2.218):datavg[updatefs:657] [[ /oracle/EPP/oraarch == None ]] +epprd_rg:clvaryonvg(2.218):datavg[updatefs:665] [[ /oracle/EPP/oraarch == /oracle/EPP/oraarch ]] +epprd_rg:clvaryonvg(2.218):datavg[updatefs:665] [[ /oracle/EPP/oraarch != /oracle/EPP/oraarch ]] +epprd_rg:clvaryonvg(2.218):datavg[updatefs:685] [[ /oracle/EPP/oraarch != /oracle/EPP/oraarch ]] +epprd_rg:clvaryonvg(2.218):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.218):datavg[updatefs:600] clodmget -q 'name = sapdata1lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.221):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.221):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.221):datavg[updatefs:607] /usr/sbin/getlvcb -f sapdata1lv +epprd_rg:clvaryonvg(2.238):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.238):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.238):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.238):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.238):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.238):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.238):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.238):datavg[updatefs:623] : Label and file system type from LVCB on disk for sapdata1lv +epprd_rg:clvaryonvg(2.239):datavg[updatefs:625] getlvcb -T -A sapdata1lv +epprd_rg:clvaryonvg(2.240):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.243):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.246):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.248):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.261):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.261):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.261):datavg[updatefs:632] : Mount point in /etc/filesystems for sapdata1lv +epprd_rg:clvaryonvg(2.262):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/sapdata1lv$' /etc/filesystems +epprd_rg:clvaryonvg(2.265):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.265):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.270):datavg[updatefs:634] fs_mount_point=/oracle/EPP/sapdata1 +epprd_rg:clvaryonvg(2.270):datavg[updatefs:637] : CuAt label attribute for sapdata1lv +epprd_rg:clvaryonvg(2.270):datavg[updatefs:639] clodmget -q 'name = sapdata1lv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.273):datavg[updatefs:639] CuAt_label=/oracle/EPP/sapdata1 +epprd_rg:clvaryonvg(2.275):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.276):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.279):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.279):datavg[updatefs:657] [[ -z /oracle/EPP/sapdata1 ]] +epprd_rg:clvaryonvg(2.279):datavg[updatefs:657] [[ /oracle/EPP/sapdata1 == None ]] +epprd_rg:clvaryonvg(2.279):datavg[updatefs:665] [[ /oracle/EPP/sapdata1 == /oracle/EPP/sapdata1 ]] +epprd_rg:clvaryonvg(2.279):datavg[updatefs:665] [[ /oracle/EPP/sapdata1 != /oracle/EPP/sapdata1 ]] +epprd_rg:clvaryonvg(2.279):datavg[updatefs:685] [[ /oracle/EPP/sapdata1 != /oracle/EPP/sapdata1 ]] +epprd_rg:clvaryonvg(2.279):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.279):datavg[updatefs:600] clodmget -q 'name = sapdata2lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.282):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.282):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.282):datavg[updatefs:607] /usr/sbin/getlvcb -f sapdata2lv +epprd_rg:clvaryonvg(2.300):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.300):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.300):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.300):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.300):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.300):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.300):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.300):datavg[updatefs:623] : Label and file system type from LVCB on disk for sapdata2lv +epprd_rg:clvaryonvg(2.301):datavg[updatefs:625] getlvcb -T -A sapdata2lv +epprd_rg:clvaryonvg(2.301):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.304):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.307):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.309):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.321):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.321):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.321):datavg[updatefs:632] : Mount point in /etc/filesystems for sapdata2lv +epprd_rg:clvaryonvg(2.323):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/sapdata2lv$' /etc/filesystems +epprd_rg:clvaryonvg(2.325):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.327):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.330):datavg[updatefs:634] fs_mount_point=/oracle/EPP/sapdata2 +epprd_rg:clvaryonvg(2.330):datavg[updatefs:637] : CuAt label attribute for sapdata2lv +epprd_rg:clvaryonvg(2.330):datavg[updatefs:639] clodmget -q 'name = sapdata2lv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.334):datavg[updatefs:639] CuAt_label=/oracle/EPP/sapdata2 +epprd_rg:clvaryonvg(2.336):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.337):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.340):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.340):datavg[updatefs:657] [[ -z /oracle/EPP/sapdata2 ]] +epprd_rg:clvaryonvg(2.340):datavg[updatefs:657] [[ /oracle/EPP/sapdata2 == None ]] +epprd_rg:clvaryonvg(2.340):datavg[updatefs:665] [[ /oracle/EPP/sapdata2 == /oracle/EPP/sapdata2 ]] +epprd_rg:clvaryonvg(2.340):datavg[updatefs:665] [[ /oracle/EPP/sapdata2 != /oracle/EPP/sapdata2 ]] +epprd_rg:clvaryonvg(2.340):datavg[updatefs:685] [[ /oracle/EPP/sapdata2 != /oracle/EPP/sapdata2 ]] +epprd_rg:clvaryonvg(2.340):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.340):datavg[updatefs:600] clodmget -q 'name = sapdata3lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.343):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.343):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.343):datavg[updatefs:607] /usr/sbin/getlvcb -f sapdata3lv +epprd_rg:clvaryonvg(2.360):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.360):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.360):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.361):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.361):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.361):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.361):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.361):datavg[updatefs:623] : Label and file system type from LVCB on disk for sapdata3lv +epprd_rg:clvaryonvg(2.362):datavg[updatefs:625] getlvcb -T -A sapdata3lv +epprd_rg:clvaryonvg(2.362):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.365):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.368):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.370):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.382):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.382):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.382):datavg[updatefs:632] : Mount point in /etc/filesystems for sapdata3lv +epprd_rg:clvaryonvg(2.384):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/sapdata3lv$' /etc/filesystems +epprd_rg:clvaryonvg(2.386):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.388):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.391):datavg[updatefs:634] fs_mount_point=/oracle/EPP/sapdata3 +epprd_rg:clvaryonvg(2.391):datavg[updatefs:637] : CuAt label attribute for sapdata3lv +epprd_rg:clvaryonvg(2.391):datavg[updatefs:639] clodmget -q 'name = sapdata3lv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.395):datavg[updatefs:639] CuAt_label=/oracle/EPP/sapdata3 +epprd_rg:clvaryonvg(2.396):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.397):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.400):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.400):datavg[updatefs:657] [[ -z /oracle/EPP/sapdata3 ]] +epprd_rg:clvaryonvg(2.400):datavg[updatefs:657] [[ /oracle/EPP/sapdata3 == None ]] +epprd_rg:clvaryonvg(2.400):datavg[updatefs:665] [[ /oracle/EPP/sapdata3 == /oracle/EPP/sapdata3 ]] +epprd_rg:clvaryonvg(2.400):datavg[updatefs:665] [[ /oracle/EPP/sapdata3 != /oracle/EPP/sapdata3 ]] +epprd_rg:clvaryonvg(2.400):datavg[updatefs:685] [[ /oracle/EPP/sapdata3 != /oracle/EPP/sapdata3 ]] +epprd_rg:clvaryonvg(2.401):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.401):datavg[updatefs:600] clodmget -q 'name = sapdata4lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.404):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.404):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.404):datavg[updatefs:607] /usr/sbin/getlvcb -f sapdata4lv +epprd_rg:clvaryonvg(2.422):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.422):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.422):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.422):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.422):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.422):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.422):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.422):datavg[updatefs:623] : Label and file system type from LVCB on disk for sapdata4lv +epprd_rg:clvaryonvg(2.423):datavg[updatefs:625] getlvcb -T -A sapdata4lv +epprd_rg:clvaryonvg(2.424):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.427):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.430):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.432):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.447):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.447):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.447):datavg[updatefs:632] : Mount point in /etc/filesystems for sapdata4lv +epprd_rg:clvaryonvg(2.449):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/sapdata4lv$' /etc/filesystems +epprd_rg:clvaryonvg(2.451):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.453):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.456):datavg[updatefs:634] fs_mount_point=/oracle/EPP/sapdata4 +epprd_rg:clvaryonvg(2.456):datavg[updatefs:637] : CuAt label attribute for sapdata4lv +epprd_rg:clvaryonvg(2.456):datavg[updatefs:639] clodmget -q 'name = sapdata4lv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.459):datavg[updatefs:639] CuAt_label=/oracle/EPP/sapdata4 +epprd_rg:clvaryonvg(2.461):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.462):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.465):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.465):datavg[updatefs:657] [[ -z /oracle/EPP/sapdata4 ]] +epprd_rg:clvaryonvg(2.465):datavg[updatefs:657] [[ /oracle/EPP/sapdata4 == None ]] +epprd_rg:clvaryonvg(2.465):datavg[updatefs:665] [[ /oracle/EPP/sapdata4 == /oracle/EPP/sapdata4 ]] +epprd_rg:clvaryonvg(2.465):datavg[updatefs:665] [[ /oracle/EPP/sapdata4 != /oracle/EPP/sapdata4 ]] +epprd_rg:clvaryonvg(2.465):datavg[updatefs:685] [[ /oracle/EPP/sapdata4 != /oracle/EPP/sapdata4 ]] +epprd_rg:clvaryonvg(2.465):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.465):datavg[updatefs:600] clodmget -q 'name = boardlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.468):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.468):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.468):datavg[updatefs:607] /usr/sbin/getlvcb -f boardlv +epprd_rg:clvaryonvg(2.486):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.486):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.486):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.486):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.486):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.486):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.486):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.486):datavg[updatefs:623] : Label and file system type from LVCB on disk for boardlv +epprd_rg:clvaryonvg(2.487):datavg[updatefs:625] getlvcb -T -A boardlv +epprd_rg:clvaryonvg(2.487):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.490):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.493):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.495):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.508):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.508):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.508):datavg[updatefs:632] : Mount point in /etc/filesystems for boardlv +epprd_rg:clvaryonvg(2.510):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/boardlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.512):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.513):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.517):datavg[updatefs:634] fs_mount_point=/board_org +epprd_rg:clvaryonvg(2.517):datavg[updatefs:637] : CuAt label attribute for boardlv +epprd_rg:clvaryonvg(2.517):datavg[updatefs:639] clodmget -q 'name = boardlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.520):datavg[updatefs:639] CuAt_label=/board_org +epprd_rg:clvaryonvg(2.522):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.523):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.526):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.526):datavg[updatefs:657] [[ -z /board_org ]] +epprd_rg:clvaryonvg(2.526):datavg[updatefs:657] [[ /board_org == None ]] +epprd_rg:clvaryonvg(2.526):datavg[updatefs:665] [[ /board_org == /board_org ]] +epprd_rg:clvaryonvg(2.526):datavg[updatefs:665] [[ /board_org != /board_org ]] +epprd_rg:clvaryonvg(2.526):datavg[updatefs:685] [[ /board_org != /board_org ]] +epprd_rg:clvaryonvg(2.526):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.526):datavg[updatefs:600] clodmget -q 'name = origlogAlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.529):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.529):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.529):datavg[updatefs:607] /usr/sbin/getlvcb -f origlogAlv +epprd_rg:clvaryonvg(2.547):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.547):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.547):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.547):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.547):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.547):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.547):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.547):datavg[updatefs:623] : Label and file system type from LVCB on disk for origlogAlv +epprd_rg:clvaryonvg(2.548):datavg[updatefs:625] getlvcb -T -A origlogAlv +epprd_rg:clvaryonvg(2.548):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.552):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.555):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.557):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.569):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.569):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.569):datavg[updatefs:632] : Mount point in /etc/filesystems for origlogAlv +epprd_rg:clvaryonvg(2.571):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/origlogAlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.573):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.574):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.578):datavg[updatefs:634] fs_mount_point=/oracle/EPP/origlogA +epprd_rg:clvaryonvg(2.578):datavg[updatefs:637] : CuAt label attribute for origlogAlv +epprd_rg:clvaryonvg(2.578):datavg[updatefs:639] clodmget -q 'name = origlogAlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.581):datavg[updatefs:639] CuAt_label=/oracle/EPP/origlogA +epprd_rg:clvaryonvg(2.583):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.584):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.587):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.587):datavg[updatefs:657] [[ -z /oracle/EPP/origlogA ]] +epprd_rg:clvaryonvg(2.587):datavg[updatefs:657] [[ /oracle/EPP/origlogA == None ]] +epprd_rg:clvaryonvg(2.587):datavg[updatefs:665] [[ /oracle/EPP/origlogA == /oracle/EPP/origlogA ]] +epprd_rg:clvaryonvg(2.587):datavg[updatefs:665] [[ /oracle/EPP/origlogA != /oracle/EPP/origlogA ]] +epprd_rg:clvaryonvg(2.587):datavg[updatefs:685] [[ /oracle/EPP/origlogA != /oracle/EPP/origlogA ]] +epprd_rg:clvaryonvg(2.587):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.587):datavg[updatefs:600] clodmget -q 'name = origlogBlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.591):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.591):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.591):datavg[updatefs:607] /usr/sbin/getlvcb -f origlogBlv +epprd_rg:clvaryonvg(2.608):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.608):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.608):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.608):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.608):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.608):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.608):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.608):datavg[updatefs:623] : Label and file system type from LVCB on disk for origlogBlv +epprd_rg:clvaryonvg(2.609):datavg[updatefs:625] getlvcb -T -A origlogBlv +epprd_rg:clvaryonvg(2.609):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.613):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.616):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.617):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.630):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.630):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.630):datavg[updatefs:632] : Mount point in /etc/filesystems for origlogBlv +epprd_rg:clvaryonvg(2.632):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/origlogBlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.634):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.635):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.639):datavg[updatefs:634] fs_mount_point=/oracle/EPP/origlogB +epprd_rg:clvaryonvg(2.639):datavg[updatefs:637] : CuAt label attribute for origlogBlv +epprd_rg:clvaryonvg(2.639):datavg[updatefs:639] clodmget -q 'name = origlogBlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.643):datavg[updatefs:639] CuAt_label=/oracle/EPP/origlogB +epprd_rg:clvaryonvg(2.644):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.645):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.648):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.648):datavg[updatefs:657] [[ -z /oracle/EPP/origlogB ]] +epprd_rg:clvaryonvg(2.648):datavg[updatefs:657] [[ /oracle/EPP/origlogB == None ]] +epprd_rg:clvaryonvg(2.648):datavg[updatefs:665] [[ /oracle/EPP/origlogB == /oracle/EPP/origlogB ]] +epprd_rg:clvaryonvg(2.648):datavg[updatefs:665] [[ /oracle/EPP/origlogB != /oracle/EPP/origlogB ]] +epprd_rg:clvaryonvg(2.648):datavg[updatefs:685] [[ /oracle/EPP/origlogB != /oracle/EPP/origlogB ]] +epprd_rg:clvaryonvg(2.648):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.649):datavg[updatefs:600] clodmget -q 'name = mirrlogAlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.652):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.652):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.652):datavg[updatefs:607] /usr/sbin/getlvcb -f mirrlogAlv +epprd_rg:clvaryonvg(2.669):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.669):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.669):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.669):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.669):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.669):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.669):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.669):datavg[updatefs:623] : Label and file system type from LVCB on disk for mirrlogAlv +epprd_rg:clvaryonvg(2.670):datavg[updatefs:625] getlvcb -T -A mirrlogAlv +epprd_rg:clvaryonvg(2.670):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.674):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.677):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.679):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.691):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.691):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.691):datavg[updatefs:632] : Mount point in /etc/filesystems for mirrlogAlv +epprd_rg:clvaryonvg(2.693):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/mirrlogAlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.695):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.697):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.700):datavg[updatefs:634] fs_mount_point=/oracle/EPP/mirrlogA +epprd_rg:clvaryonvg(2.700):datavg[updatefs:637] : CuAt label attribute for mirrlogAlv +epprd_rg:clvaryonvg(2.700):datavg[updatefs:639] clodmget -q 'name = mirrlogAlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.703):datavg[updatefs:639] CuAt_label=/oracle/EPP/mirrlogA +epprd_rg:clvaryonvg(2.705):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.706):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.709):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.709):datavg[updatefs:657] [[ -z /oracle/EPP/mirrlogA ]] +epprd_rg:clvaryonvg(2.709):datavg[updatefs:657] [[ /oracle/EPP/mirrlogA == None ]] +epprd_rg:clvaryonvg(2.709):datavg[updatefs:665] [[ /oracle/EPP/mirrlogA == /oracle/EPP/mirrlogA ]] +epprd_rg:clvaryonvg(2.709):datavg[updatefs:665] [[ /oracle/EPP/mirrlogA != /oracle/EPP/mirrlogA ]] +epprd_rg:clvaryonvg(2.709):datavg[updatefs:685] [[ /oracle/EPP/mirrlogA != /oracle/EPP/mirrlogA ]] +epprd_rg:clvaryonvg(2.710):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.710):datavg[updatefs:600] clodmget -q 'name = mirrlogBlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.713):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.713):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.713):datavg[updatefs:607] /usr/sbin/getlvcb -f mirrlogBlv +epprd_rg:clvaryonvg(2.730):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.730):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.730):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.730):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.730):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.730):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.730):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.730):datavg[updatefs:623] : Label and file system type from LVCB on disk for mirrlogBlv +epprd_rg:clvaryonvg(2.731):datavg[updatefs:625] getlvcb -T -A mirrlogBlv +epprd_rg:clvaryonvg(2.731):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.735):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.738):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.740):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.752):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.752):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.752):datavg[updatefs:632] : Mount point in /etc/filesystems for mirrlogBlv +epprd_rg:clvaryonvg(2.754):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/mirrlogBlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.756):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.758):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.761):datavg[updatefs:634] fs_mount_point=/oracle/EPP/mirrlogB +epprd_rg:clvaryonvg(2.761):datavg[updatefs:637] : CuAt label attribute for mirrlogBlv +epprd_rg:clvaryonvg(2.761):datavg[updatefs:639] clodmget -q 'name = mirrlogBlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.765):datavg[updatefs:639] CuAt_label=/oracle/EPP/mirrlogB +epprd_rg:clvaryonvg(2.766):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.767):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.771):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.771):datavg[updatefs:657] [[ -z /oracle/EPP/mirrlogB ]] +epprd_rg:clvaryonvg(2.771):datavg[updatefs:657] [[ /oracle/EPP/mirrlogB == None ]] +epprd_rg:clvaryonvg(2.771):datavg[updatefs:665] [[ /oracle/EPP/mirrlogB == /oracle/EPP/mirrlogB ]] +epprd_rg:clvaryonvg(2.771):datavg[updatefs:665] [[ /oracle/EPP/mirrlogB != /oracle/EPP/mirrlogB ]] +epprd_rg:clvaryonvg(2.771):datavg[updatefs:685] [[ /oracle/EPP/mirrlogB != /oracle/EPP/mirrlogB ]] +epprd_rg:clvaryonvg(2.771):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.771):datavg[updatefs:600] clodmget -q 'name = epprdaloglv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.774):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.774):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.774):datavg[updatefs:607] /usr/sbin/getlvcb -f epprdaloglv +epprd_rg:clvaryonvg(2.792):datavg[updatefs:607] fs_info=' ' +epprd_rg:clvaryonvg(2.792):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.792):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.792):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.792):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.792):datavg[updatefs:618] [[ -z ' ' ]] +epprd_rg:clvaryonvg(2.792):datavg[updatefs:618] [[ ' ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.792):datavg[updatefs:620] continue +epprd_rg:clvaryonvg(2.792):datavg[1641] : At this point, the volume should be varied on, so get the current +epprd_rg:clvaryonvg(2.792):datavg[1642] : timestamp if needed +epprd_rg:clvaryonvg(2.792):datavg[1644] vgdatimestamps +epprd_rg:clvaryonvg(2.792):datavg[vgdatimestamps:201] PS4_FUNC=vgdatimestamps +epprd_rg:clvaryonvg(2.792):datavg[vgdatimestamps:201] typeset PS4_FUNC +epprd_rg:clvaryonvg(2.792):datavg[vgdatimestamps:202] [[ high == high ]] +epprd_rg:clvaryonvg(2.792):datavg[vgdatimestamps:202] set -x +epprd_rg:clvaryonvg(2.792):datavg[vgdatimestamps:203] set -u +epprd_rg:clvaryonvg(2.792):datavg[vgdatimestamps:206] : See what timestamp LVM has recorded from the last time it checked +epprd_rg:clvaryonvg(2.792):datavg[vgdatimestamps:207] : the disks +epprd_rg:clvaryonvg(2.792):datavg[vgdatimestamps:209] /usr/sbin/getlvodm -T 00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(2.793):datavg[vgdatimestamps:209] 2> /dev/null +epprd_rg:clvaryonvg(2.795):datavg[vgdatimestamps:209] TS_FROM_ODM=63d4e5ad2e52f576 +epprd_rg:clvaryonvg(2.795):datavg[vgdatimestamps:212] : Check to see if HACMP is maintaining a timestamp for this volume group +epprd_rg:clvaryonvg(2.795):datavg[vgdatimestamps:213] : Needed for some older volume groups +epprd_rg:clvaryonvg(2.795):datavg[vgdatimestamps:215] [[ -s /usr/es/sbin/cluster/etc/vg/datavg.tstamp ]] +epprd_rg:clvaryonvg(2.795):datavg[vgdatimestamps:234] : Get the time stamp from the actual disk +epprd_rg:clvaryonvg(2.795):datavg[vgdatimestamps:236] clvgdats /dev/datavg +epprd_rg:clvaryonvg(2.796):datavg[vgdatimestamps:236] 2> /dev/null +epprd_rg:clvaryonvg(2.805):datavg[vgdatimestamps:236] TS_FROM_DISK=63d4e5ad2e52f576 +epprd_rg:clvaryonvg(2.805):datavg[vgdatimestamps:237] clvgdats_rc=0 +epprd_rg:clvaryonvg(2.805):datavg[vgdatimestamps:238] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.806):datavg[vgdatimestamps:247] [[ -z 63d4e5ad2e52f576 ]] +epprd_rg:clvaryonvg(2.806):datavg[1645] [[ -z 63d4e5ad2e52f576 ]] +epprd_rg:clvaryonvg(2.806):datavg[1656] : Finally, leave the volume in the requested state - on or off +epprd_rg:clvaryonvg(2.806):datavg[1658] [[ FALSE == TRUE ]] +epprd_rg:clvaryonvg(2.806):datavg[1665] (( 0 == 0 )) +epprd_rg:clvaryonvg(2.806):datavg[1668] : Synchronize time stamps globally +epprd_rg:clvaryonvg(2.806):datavg[1670] cl_update_vg_odm_ts -o datavg +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[77] version=1.13 +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[121] o_flag='' +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[122] f_flag='' +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[123] getopts :of option +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[126] : Local timestamps should be good, since volume group was +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[127] : just varyied on or off +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[128] o_flag=TRUE +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[123] getopts :of option +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[142] shift 1 +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[144] vg_name=datavg +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[145] [[ -z datavg ]] +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[151] shift +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[152] node_list='' +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[153] /usr/es/sbin/cluster/utilities/cl_get_path all +epprd_rg:cl_update_vg_odm_ts(0.004):datavg[153] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin +epprd_rg:cl_update_vg_odm_ts(0.004):datavg[155] [[ -z '' ]] +epprd_rg:cl_update_vg_odm_ts(0.004):datavg[158] : Check to see if this update is necessary - some LVM levels automatically +epprd_rg:cl_update_vg_odm_ts(0.004):datavg[159] : update volume group timestamps clusterwide. +epprd_rg:cl_update_vg_odm_ts(0.004):datavg[163] instfix -iqk IV74100 +epprd_rg:cl_update_vg_odm_ts(0.005):datavg[163] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.012):datavg[164] instfix -iqk IV74883 +epprd_rg:cl_update_vg_odm_ts(0.012):datavg[164] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.019):datavg[165] instfix -iqk IV74698 +epprd_rg:cl_update_vg_odm_ts(0.020):datavg[165] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.026):datavg[166] instfix -iqk IV74246 +epprd_rg:cl_update_vg_odm_ts(0.027):datavg[166] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.034):datavg[174] emgr -l -L IV74883 +epprd_rg:cl_update_vg_odm_ts(0.035):datavg[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.304):datavg[174] emgr -l -L IV74698 +epprd_rg:cl_update_vg_odm_ts(0.305):datavg[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.572):datavg[174] emgr -l -L IV74246 +epprd_rg:cl_update_vg_odm_ts(0.573):datavg[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.841):datavg[183] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_update_vg_odm_ts(0.841):datavg[184] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_update_vg_odm_ts(0.841):datavg[185] : 99.99.999.999 +epprd_rg:cl_update_vg_odm_ts(0.841):datavg[187] typeset -li V R M F +epprd_rg:cl_update_vg_odm_ts(0.841):datavg[188] typeset -Z2 V +epprd_rg:cl_update_vg_odm_ts(0.841):datavg[189] typeset -Z2 R +epprd_rg:cl_update_vg_odm_ts(0.841):datavg[190] typeset -Z3 M +epprd_rg:cl_update_vg_odm_ts(0.841):datavg[191] typeset -Z3 F +epprd_rg:cl_update_vg_odm_ts(0.841):datavg[192] lvm_lvl6=601008015 +epprd_rg:cl_update_vg_odm_ts(0.841):datavg[192] typeset -li lvm_lvl6 +epprd_rg:cl_update_vg_odm_ts(0.841):datavg[194] lvm_lvl7=701003046 +epprd_rg:cl_update_vg_odm_ts(0.841):datavg[194] typeset -li lvm_lvl7 +epprd_rg:cl_update_vg_odm_ts(0.841):datavg[195] VRMF=0 +epprd_rg:cl_update_vg_odm_ts(0.841):datavg[195] typeset -li VRMF +epprd_rg:cl_update_vg_odm_ts(0.841):datavg[198] : Here try and figure out what level of LVM is installed +epprd_rg:cl_update_vg_odm_ts(0.842):datavg[200] lslpp -lcqOr bos.rte.lvm +epprd_rg:cl_update_vg_odm_ts(0.846):datavg[200] cut -f3 -d: +epprd_rg:cl_update_vg_odm_ts(0.845):datavg[200] read V R M F +epprd_rg:cl_update_vg_odm_ts(0.845):datavg[200] IFS=. +epprd_rg:cl_update_vg_odm_ts(0.847):datavg[201] VRMF=0702005101 +epprd_rg:cl_update_vg_odm_ts(0.847):datavg[203] (( 7 == 6 && 702005101 >= 601008015 )) +epprd_rg:cl_update_vg_odm_ts(0.847):datavg[204] (( 702005101 >= 701003046 )) +epprd_rg:cl_update_vg_odm_ts(0.847):datavg[207] : LVM at a level in which timestamp update is unnecessary +epprd_rg:cl_update_vg_odm_ts(0.847):datavg[209] return 0 +epprd_rg:clvaryonvg(3.657):datavg[1674] : On successful varyon, clean up any files used to track errors with +epprd_rg:clvaryonvg(3.657):datavg[1675] : this volume group +epprd_rg:clvaryonvg(3.657):datavg[1677] rm -f /usr/es/sbin/cluster/etc/vg/datavg.desc /usr/es/sbin/cluster/etc/vg/datavg.replay /usr/es/sbin/cluster/etc/vg/datavg.perms /usr/es/sbin/cluster/etc/vg/datavg.tstamp /usr/es/sbin/cluster/etc/vg/datavg.fail +epprd_rg:clvaryonvg(3.660):datavg[1680] : Note that a sync has not been done on the volume group at this point. +epprd_rg:clvaryonvg(3.660):datavg[1681] : A sync is kicked off in cl_sync_vgs, once any filesystem mounts are +epprd_rg:clvaryonvg(3.660):datavg[1682] : complete. A sync at this time would interfere with the mounts +epprd_rg:clvaryonvg(3.660):datavg[1685] return 0 +epprd_rg:cl_activate_vgs(3.745):datavg[vgs_chk:103] ERRMSG=$'cl_set_vg_fence_height[126]: version @(#)10\t1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37\ncl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY)\ncl_set_vg_fence_height[214]: read(datavg, 16)\ncl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid)\ncl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0))' +epprd_rg:cl_activate_vgs(3.745):datavg[vgs_chk:104] RC=0 +epprd_rg:cl_activate_vgs(3.745):datavg[vgs_chk:107] (( 0 == 1 || 0 == 20 )) +epprd_rg:cl_activate_vgs(3.745):datavg[vgs_chk:115] : exit status of clvaryonvg -n datavg: 0 +epprd_rg:cl_activate_vgs(3.745):datavg[vgs_chk:117] [[ -n $'cl_set_vg_fence_height[126]: version @(#)10\t1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37\ncl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY)\ncl_set_vg_fence_height[214]: read(datavg, 16)\ncl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid)\ncl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0))' ]] +epprd_rg:cl_activate_vgs(3.745):datavg[vgs_chk:117] (( 0 != 1 )) +epprd_rg:cl_activate_vgs(3.745):datavg[vgs_chk:119] cl_echo 286 $'cl_activate_vgs: Successful clvaryonvg of datavg with message cl_set_vg_fence_height[126]: version @(#)10\t1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37\ncl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY)\ncl_set_vg_fence_height[214]: read(datavg, 16)\ncl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid)\ncl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0)).' cl_activate_vgs datavg 'cl_set_vg_fence_height[126]:' version '@(#)10' 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 'cl_set_vg_fence_height[180]:' 'open(/usr/es/sbin/cluster/etc/vg/datavg.uuid,' 'O_RDONLY)' 'cl_set_vg_fence_height[214]:' 'read(datavg,' '16)' 'cl_set_vg_fence_height[237]:' 'close(/usr/es/sbin/cluster/etc/vg/datavg.uuid)' 'cl_set_vg_fence_height[265]:' 'sfwSetFenceGroup(vg=datavg' uuid=ec2db4422261eae02091227fb9e53c88 height='rw(0))' Jan 28 2023 18:06:57cl_activate_vgs: Successful clvaryonvg of datavg with message cl_set_vg_fence_height[126]: version @(#)10 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 cl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY) cl_set_vg_fence_height[214]: read(datavg, 16) cl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0)).+epprd_rg:cl_activate_vgs(3.763):datavg[vgs_chk:123] [[ 0 != 0 ]] +epprd_rg:cl_activate_vgs(3.763):datavg[vgs_chk:127] amlog_trace '' 'Activating Volume Group|datavg' +epprd_rg:cl_activate_vgs(3.763):datavg[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_vgs(3.764):datavg[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_vgs(3.789):datavg[amlog_trace:319] cltime +epprd_rg:cl_activate_vgs(3.791):datavg[amlog_trace:319] DATE=2023-01-28T18:06:57.276424 +epprd_rg:cl_activate_vgs(3.791):datavg[amlog_trace:320] echo '|2023-01-28T18:06:57.276424|INFO: Activating Volume Group|datavg' +epprd_rg:cl_activate_vgs(3.791):datavg[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_vgs(3.791):datavg[vgs_chk:132] echo datavg 0 +epprd_rg:cl_activate_vgs(3.791):datavg[vgs_chk:132] 1>> /tmp/_activate_vgs.tmp +epprd_rg:cl_activate_vgs(3.792):datavg[vgs_chk:133] return 0 +epprd_rg:cl_activate_vgs:datavg[vgs_list:198] unset PS4_LOOP PS4_TIMER +epprd_rg:cl_activate_vgs[304] wait +epprd_rg:cl_activate_vgs[310] ALLNOERRVGS=All_nonerror_volume_groups +epprd_rg:cl_activate_vgs[311] cl_RMupdate resource_up All_nonerror_volume_groups cl_activate_vgs 2023-01-28T18:06:57.300046 2023-01-28T18:06:57.304357 +epprd_rg:cl_activate_vgs[318] [[ -f /tmp/_activate_vgs.tmp ]] +epprd_rg:cl_activate_vgs[320] grep ' 1' /tmp/_activate_vgs.tmp +epprd_rg:cl_activate_vgs[329] rm -f /tmp/_activate_vgs.tmp +epprd_rg:cl_activate_vgs[332] exit 0 +epprd_rg:process_resources[process_volume_groups:2584] RC=0 +epprd_rg:process_resources[process_volume_groups:2585] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[process_volume_groups:2598] (( 0 != 0 )) +epprd_rg:process_resources[process_volume_groups:2627] return 0 +epprd_rg:process_resources[process_volume_groups_main:2556] STAT=0 +epprd_rg:process_resources[process_volume_groups_main:2559] return 0 +epprd_rg:process_resources[3572] RC=0 +epprd_rg:process_resources[3573] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:06:57.322080 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=LOGREDO ACTION=ACQUIRE VOLUME_GROUPS='"datavg"' RESOURCE_GROUPS='"epprd_rg' '"' +epprd_rg:process_resources[1] JOB_TYPE=LOGREDO +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] VOLUME_GROUPS=datavg +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ LOGREDO == RELEASE ]] +epprd_rg:process_resources[3360] [[ LOGREDO == ONLINE ]] +epprd_rg:process_resources[3634] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[3635] logredo_volume_groups +epprd_rg:process_resources[logredo_volume_groups:2745] PS4_FUNC=logredo_volume_groups +epprd_rg:process_resources[logredo_volume_groups:2745] typeset PS4_FUNC +epprd_rg:process_resources(4.734)[logredo_volume_groups:2746] PS4_TIMER=true +epprd_rg:process_resources(4.734)[logredo_volume_groups:2746] typeset PS4_TIMER +epprd_rg:process_resources(4.734)[logredo_volume_groups:2747] [[ high == high ]] +epprd_rg:process_resources(4.734)[logredo_volume_groups:2747] set -x +epprd_rg:process_resources(4.734)[logredo_volume_groups:2749] TMP_FILE=/var/hacmp/log/.process_resources_logredo.20185524 +epprd_rg:process_resources(4.734)[logredo_volume_groups:2749] export TMP_FILE +epprd_rg:process_resources(4.734)[logredo_volume_groups:2750] rm -f '/var/hacmp/log/.process_resources_logredo*' +epprd_rg:process_resources(4.737)[logredo_volume_groups:2752] STAT=0 +epprd_rg:process_resources(4.737)[logredo_volume_groups:2755] export GROUPNAME +epprd_rg:process_resources(4.738)[logredo_volume_groups:2757] get_list_head datavg +epprd_rg:process_resources(4.739)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(4.739)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(4.739)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(4.739)[get_list_head:60] set -x +epprd_rg:process_resources(4.740)[get_list_head:61] echo datavg +epprd_rg:process_resources(4.741)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(4.741)[get_list_head:61] IFS=: +epprd_rg:process_resources(4.742)[get_list_head:62] echo datavg +epprd_rg:process_resources(4.743)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(4.738)[logredo_volume_groups:2757] read LIST_OF_VOLUME_GROUPS_FOR_RG +epprd_rg:process_resources(4.749)[logredo_volume_groups:2758] get_list_tail datavg +epprd_rg:process_resources(4.749)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(4.749)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(4.749)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(4.749)[get_list_tail:68] set -x +epprd_rg:process_resources(4.750)[get_list_tail:69] echo datavg +epprd_rg:process_resources(4.751)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(4.751)[get_list_tail:69] IFS=: +epprd_rg:process_resources(4.751)[get_list_tail:70] echo +epprd_rg:process_resources(4.748)[logredo_volume_groups:2758] read VOLUME_GROUPS +epprd_rg:process_resources(4.753)[logredo_volume_groups:2761] : Run logredo on all JFS/JFS2 log devices to assure FS consistency +epprd_rg:process_resources(4.753)[logredo_volume_groups:2763] ALL_LVs='' +epprd_rg:process_resources(4.753)[logredo_volume_groups:2764] lv_all='' +epprd_rg:process_resources(4.753)[logredo_volume_groups:2765] mount_fs='' +epprd_rg:process_resources(4.753)[logredo_volume_groups:2766] fsck_check='' +epprd_rg:process_resources(4.753)[logredo_volume_groups:2767] MOUNTGUARD='' +epprd_rg:process_resources(4.753)[logredo_volume_groups:2768] FMMOUNT_OUT='' +epprd_rg:process_resources(4.753)[logredo_volume_groups:2769] FMMOUNT='' +epprd_rg:process_resources(4.754)[logredo_volume_groups:2772] lsvg -lL datavg +epprd_rg:process_resources(4.754)[logredo_volume_groups:2772] LC_ALL=C +epprd_rg:process_resources(4.757)[logredo_volume_groups:2772] tail +3 +epprd_rg:process_resources(4.758)[logredo_volume_groups:2772] 1>> /var/hacmp/log/.process_resources_logredo.20185524 +epprd_rg:process_resources(4.780)[logredo_volume_groups:2774] cat /var/hacmp/log/.process_resources_logredo.20185524 +epprd_rg:process_resources(4.782)[logredo_volume_groups:2774] awk '{print $1}' +epprd_rg:process_resources(4.787)[logredo_volume_groups:2774] ALL_LVs=$'epprdaloglv\nsaplv\nsapmntlv\noraclelv\nepplv\noraarchlv\nsapdata1lv\nsapdata2lv\nsapdata3lv\nsapdata4lv\nboardlv\noriglogAlv\noriglogBlv\nmirrlogAlv\nmirrlogBlv' +epprd_rg:process_resources(4.787)[logredo_volume_groups:2777] : Verify if any of the file system associated with volume group datavg +epprd_rg:process_resources(4.787)[logredo_volume_groups:2778] : is already mounted anywhere else in the cluster. +epprd_rg:process_resources(4.787)[logredo_volume_groups:2779] : If it is already mounted somewhere else, we dont want to continue +epprd_rg:process_resources(4.787)[logredo_volume_groups:2780] : here to avoid data corruption. +epprd_rg:process_resources(4.789)[logredo_volume_groups:2782] cat /var/hacmp/log/.process_resources_logredo.20185524 +epprd_rg:process_resources(4.792)[logredo_volume_groups:2782] grep -v N/A +epprd_rg:process_resources(4.794)[logredo_volume_groups:2782] awk '{print $1}' +epprd_rg:process_resources(4.799)[logredo_volume_groups:2782] lv_all=$'saplv\nsapmntlv\noraclelv\nepplv\noraarchlv\nsapdata1lv\nsapdata2lv\nsapdata3lv\nsapdata4lv\nboardlv\noriglogAlv\noriglogBlv\nmirrlogAlv\nmirrlogBlv' +epprd_rg:process_resources(4.799)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.799)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.801)[logredo_volume_groups:2789] lsfs -qc saplv +epprd_rg:process_resources(4.801)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.802)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.802)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/saplv' was found in /etc/filesystems. +epprd_rg:process_resources(4.804)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.808)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.808)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.808)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.808)[logredo_volume_groups:2795] fsdb saplv +epprd_rg:process_resources(4.809)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.812)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.814)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.815)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.815)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.820)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.820)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.820)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.820)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.820)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.822)[logredo_volume_groups:2789] lsfs -qc sapmntlv +epprd_rg:process_resources(4.822)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.822)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.823)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/sapmntlv' was found in /etc/filesystems. +epprd_rg:process_resources(4.824)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.828)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.828)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.828)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.828)[logredo_volume_groups:2795] fsdb sapmntlv +epprd_rg:process_resources(4.829)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.834)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.836)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.836)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.836)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.841)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.841)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.841)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.841)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.841)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.843)[logredo_volume_groups:2789] lsfs -qc oraclelv +epprd_rg:process_resources(4.843)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.844)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.844)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/oraclelv' was found in /etc/filesystems. +epprd_rg:process_resources(4.846)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.850)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.850)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.850)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.850)[logredo_volume_groups:2795] fsdb oraclelv +epprd_rg:process_resources(4.851)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.854)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.856)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.856)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.857)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.861)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.862)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.862)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.862)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.862)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.864)[logredo_volume_groups:2789] lsfs -qc epplv +epprd_rg:process_resources(4.864)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.864)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.865)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/epplv' was found in /etc/filesystems. +epprd_rg:process_resources(4.866)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.870)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.870)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.870)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.870)[logredo_volume_groups:2795] fsdb epplv +epprd_rg:process_resources(4.871)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.874)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.876)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.877)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.877)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.882)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.882)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.882)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.882)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.882)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.884)[logredo_volume_groups:2789] lsfs -qc oraarchlv +epprd_rg:process_resources(4.884)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.885)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.885)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/oraarchlv' was found in /etc/filesystems. +epprd_rg:process_resources(4.886)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.890)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.890)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.890)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.890)[logredo_volume_groups:2795] fsdb oraarchlv +epprd_rg:process_resources(4.892)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.895)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.897)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.897)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.897)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.902)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.902)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.902)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.902)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.902)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.904)[logredo_volume_groups:2789] lsfs -qc sapdata1lv +epprd_rg:process_resources(4.905)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.905)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.905)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/sapdata1lv' was found in /etc/filesystems. +epprd_rg:process_resources(4.907)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.911)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.911)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.911)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.911)[logredo_volume_groups:2795] fsdb sapdata1lv +epprd_rg:process_resources(4.912)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.915)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.917)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.918)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.918)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.923)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.923)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.923)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.923)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.923)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.925)[logredo_volume_groups:2789] lsfs -qc sapdata2lv +epprd_rg:process_resources(4.925)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.925)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.926)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/sapdata2lv' was found in /etc/filesystems. +epprd_rg:process_resources(4.927)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.931)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.931)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.931)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.931)[logredo_volume_groups:2795] fsdb sapdata2lv +epprd_rg:process_resources(4.932)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.936)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.938)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.938)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.938)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.943)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.943)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.943)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.943)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.943)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.945)[logredo_volume_groups:2789] lsfs -qc sapdata3lv +epprd_rg:process_resources(4.945)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.946)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.946)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/sapdata3lv' was found in /etc/filesystems. +epprd_rg:process_resources(4.947)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.951)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.951)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.951)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.952)[logredo_volume_groups:2795] fsdb sapdata3lv +epprd_rg:process_resources(4.953)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.956)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.958)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.958)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.959)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.963)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.963)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.963)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.963)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.963)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.965)[logredo_volume_groups:2789] lsfs -qc sapdata4lv +epprd_rg:process_resources(4.966)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.966)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.967)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/sapdata4lv' was found in /etc/filesystems. +epprd_rg:process_resources(4.968)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.972)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.972)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.972)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.972)[logredo_volume_groups:2795] fsdb sapdata4lv +epprd_rg:process_resources(4.973)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.976)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.978)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.979)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.979)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.984)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.984)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.984)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.984)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.984)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.986)[logredo_volume_groups:2789] lsfs -qc boardlv +epprd_rg:process_resources(4.986)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.986)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.987)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/boardlv' was found in /etc/filesystems. +epprd_rg:process_resources(4.988)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.992)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.992)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.992)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.992)[logredo_volume_groups:2795] fsdb boardlv +epprd_rg:process_resources(4.993)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.997)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.999)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.999)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.999)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.004)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.004)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.004)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.004)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.004)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.006)[logredo_volume_groups:2789] lsfs -qc origlogAlv +epprd_rg:process_resources(5.007)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.007)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.007)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/origlogAlv' was found in /etc/filesystems. +epprd_rg:process_resources(5.009)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.013)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.013)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.013)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.013)[logredo_volume_groups:2795] fsdb origlogAlv +epprd_rg:process_resources(5.014)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.017)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.019)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.020)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.020)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.025)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.025)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.025)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.025)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.025)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.027)[logredo_volume_groups:2789] lsfs -qc origlogBlv +epprd_rg:process_resources(5.027)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.027)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.028)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/origlogBlv' was found in /etc/filesystems. +epprd_rg:process_resources(5.029)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.033)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.033)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.033)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.033)[logredo_volume_groups:2795] fsdb origlogBlv +epprd_rg:process_resources(5.034)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.038)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.040)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.040)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.040)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.045)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.045)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.045)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.045)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.045)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.047)[logredo_volume_groups:2789] lsfs -qc mirrlogAlv +epprd_rg:process_resources(5.047)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.048)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.048)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/mirrlogAlv' was found in /etc/filesystems. +epprd_rg:process_resources(5.050)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.054)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.054)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.054)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.054)[logredo_volume_groups:2795] fsdb mirrlogAlv +epprd_rg:process_resources(5.055)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.058)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.060)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.061)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.061)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.066)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.066)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.066)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.066)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.066)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.068)[logredo_volume_groups:2789] lsfs -qc mirrlogBlv +epprd_rg:process_resources(5.068)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.068)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.069)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/mirrlogBlv' was found in /etc/filesystems. +epprd_rg:process_resources(5.070)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.074)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.074)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.074)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.074)[logredo_volume_groups:2795] fsdb mirrlogBlv +epprd_rg:process_resources(5.075)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.078)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.080)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.081)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.081)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.086)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.086)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.086)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.086)[logredo_volume_groups:2814] comm_failure='' +epprd_rg:process_resources(5.086)[logredo_volume_groups:2815] rc_mount='' +epprd_rg:process_resources(5.086)[logredo_volume_groups:2816] [[ -n '' ]] +epprd_rg:process_resources(5.086)[logredo_volume_groups:2851] logdevs='' +epprd_rg:process_resources(5.086)[logredo_volume_groups:2852] HAVE_GEO='' +epprd_rg:process_resources(5.086)[logredo_volume_groups:2853] lslpp -l 'hageo.*' +epprd_rg:process_resources(5.087)[logredo_volume_groups:2853] 1> /dev/null 2>& 1 +epprd_rg:process_resources(5.090)[logredo_volume_groups:2854] lslpp -l 'geoRM.*' +epprd_rg:process_resources(5.091)[logredo_volume_groups:2854] 1> /dev/null 2>& 1 +epprd_rg:process_resources(5.094)[logredo_volume_groups:2874] pattern='jfs*log' +epprd_rg:process_resources(5.094)[logredo_volume_groups:2876] : Any device with the type as log should be added +epprd_rg:process_resources(5.094)[logredo_volume_groups:2882] odmget -q $'name = epprdaloglv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.097)[logredo_volume_groups:2882] [[ -n $'\nCuAt:\n\tname = "epprdaloglv"\n\tattribute = "type"\n\tvalue = "jfs2log"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.098)[logredo_volume_groups:2884] logdevs=' /dev/epprdaloglv' +epprd_rg:process_resources(5.098)[logredo_volume_groups:2882] odmget -q $'name = saplv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.101)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.101)[logredo_volume_groups:2882] odmget -q $'name = sapmntlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.105)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.105)[logredo_volume_groups:2882] odmget -q $'name = oraclelv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.108)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.108)[logredo_volume_groups:2882] odmget -q $'name = epplv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.112)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.112)[logredo_volume_groups:2882] odmget -q $'name = oraarchlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.115)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.115)[logredo_volume_groups:2882] odmget -q $'name = sapdata1lv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.119)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.119)[logredo_volume_groups:2882] odmget -q $'name = sapdata2lv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.122)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.122)[logredo_volume_groups:2882] odmget -q $'name = sapdata3lv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.126)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.126)[logredo_volume_groups:2882] odmget -q $'name = sapdata4lv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.129)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.130)[logredo_volume_groups:2882] odmget -q $'name = boardlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.133)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.133)[logredo_volume_groups:2882] odmget -q $'name = origlogAlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.137)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.137)[logredo_volume_groups:2882] odmget -q $'name = origlogBlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.140)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.140)[logredo_volume_groups:2882] odmget -q $'name = mirrlogAlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.144)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.144)[logredo_volume_groups:2882] odmget -q $'name = mirrlogBlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.147)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.147)[logredo_volume_groups:2889] : JFS2 file systems can have inline logs where the log LV is the same as the FS LV. +epprd_rg:process_resources(5.147)[logredo_volume_groups:2895] odmget $'-qname = epprdaloglv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.151)[logredo_volume_groups:2895] [[ -n '' ]] +epprd_rg:process_resources(5.151)[logredo_volume_groups:2895] odmget $'-qname = saplv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.154)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "saplv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.156)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.156)[logredo_volume_groups:2898] odmget -q 'name = saplv and attribute = label' CuAt +epprd_rg:process_resources(5.160)[logredo_volume_groups:2898] [[ -n /usr/sap ]] +epprd_rg:process_resources(5.162)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.162)[logredo_volume_groups:2900] grep -wp /dev/saplv /etc/filesystems +epprd_rg:process_resources(5.167)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.167)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.168)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/saplv ]] +epprd_rg:process_resources(5.168)[logredo_volume_groups:2895] odmget $'-qname = sapmntlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.171)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "sapmntlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.173)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.173)[logredo_volume_groups:2898] odmget -q 'name = sapmntlv and attribute = label' CuAt +epprd_rg:process_resources(5.177)[logredo_volume_groups:2898] [[ -n /sapmnt ]] +epprd_rg:process_resources(5.179)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.179)[logredo_volume_groups:2900] grep -wp /dev/sapmntlv /etc/filesystems +epprd_rg:process_resources(5.184)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.184)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.184)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/sapmntlv ]] +epprd_rg:process_resources(5.184)[logredo_volume_groups:2895] odmget $'-qname = oraclelv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.188)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "oraclelv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.190)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.190)[logredo_volume_groups:2898] odmget -q 'name = oraclelv and attribute = label' CuAt +epprd_rg:process_resources(5.194)[logredo_volume_groups:2898] [[ -n /oracle ]] +epprd_rg:process_resources(5.196)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.196)[logredo_volume_groups:2900] grep -wp /dev/oraclelv /etc/filesystems +epprd_rg:process_resources(5.201)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.201)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.201)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/oraclelv ]] +epprd_rg:process_resources(5.201)[logredo_volume_groups:2895] odmget $'-qname = epplv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.205)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "epplv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.207)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.207)[logredo_volume_groups:2898] odmget -q 'name = epplv and attribute = label' CuAt +epprd_rg:process_resources(5.211)[logredo_volume_groups:2898] [[ -n /oracle/EPP ]] +epprd_rg:process_resources(5.213)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.213)[logredo_volume_groups:2900] grep -wp /dev/epplv /etc/filesystems +epprd_rg:process_resources(5.218)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.218)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.218)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/epplv ]] +epprd_rg:process_resources(5.218)[logredo_volume_groups:2895] odmget $'-qname = oraarchlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.222)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "oraarchlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.224)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.224)[logredo_volume_groups:2898] odmget -q 'name = oraarchlv and attribute = label' CuAt +epprd_rg:process_resources(5.228)[logredo_volume_groups:2898] [[ -n /oracle/EPP/oraarch ]] +epprd_rg:process_resources(5.230)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.230)[logredo_volume_groups:2900] grep -wp /dev/oraarchlv /etc/filesystems +epprd_rg:process_resources(5.235)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.235)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.235)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/oraarchlv ]] +epprd_rg:process_resources(5.235)[logredo_volume_groups:2895] odmget $'-qname = sapdata1lv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.239)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "sapdata1lv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.241)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.241)[logredo_volume_groups:2898] odmget -q 'name = sapdata1lv and attribute = label' CuAt +epprd_rg:process_resources(5.245)[logredo_volume_groups:2898] [[ -n /oracle/EPP/sapdata1 ]] +epprd_rg:process_resources(5.247)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.247)[logredo_volume_groups:2900] grep -wp /dev/sapdata1lv /etc/filesystems +epprd_rg:process_resources(5.252)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.252)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.252)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/sapdata1lv ]] +epprd_rg:process_resources(5.252)[logredo_volume_groups:2895] odmget $'-qname = sapdata2lv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.256)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "sapdata2lv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.258)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.258)[logredo_volume_groups:2898] odmget -q 'name = sapdata2lv and attribute = label' CuAt +epprd_rg:process_resources(5.262)[logredo_volume_groups:2898] [[ -n /oracle/EPP/sapdata2 ]] +epprd_rg:process_resources(5.264)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.264)[logredo_volume_groups:2900] grep -wp /dev/sapdata2lv /etc/filesystems +epprd_rg:process_resources(5.269)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.269)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.269)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/sapdata2lv ]] +epprd_rg:process_resources(5.269)[logredo_volume_groups:2895] odmget $'-qname = sapdata3lv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.273)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "sapdata3lv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.275)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.275)[logredo_volume_groups:2898] odmget -q 'name = sapdata3lv and attribute = label' CuAt +epprd_rg:process_resources(5.279)[logredo_volume_groups:2898] [[ -n /oracle/EPP/sapdata3 ]] +epprd_rg:process_resources(5.281)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.281)[logredo_volume_groups:2900] grep -wp /dev/sapdata3lv /etc/filesystems +epprd_rg:process_resources(5.286)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.286)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.286)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/sapdata3lv ]] +epprd_rg:process_resources(5.286)[logredo_volume_groups:2895] odmget $'-qname = sapdata4lv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.290)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "sapdata4lv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.292)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.292)[logredo_volume_groups:2898] odmget -q 'name = sapdata4lv and attribute = label' CuAt +epprd_rg:process_resources(5.296)[logredo_volume_groups:2898] [[ -n /oracle/EPP/sapdata4 ]] +epprd_rg:process_resources(5.298)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.298)[logredo_volume_groups:2900] grep -wp /dev/sapdata4lv /etc/filesystems +epprd_rg:process_resources(5.303)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.303)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.303)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/sapdata4lv ]] +epprd_rg:process_resources(5.303)[logredo_volume_groups:2895] odmget $'-qname = boardlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.307)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "boardlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.309)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.309)[logredo_volume_groups:2898] odmget -q 'name = boardlv and attribute = label' CuAt +epprd_rg:process_resources(5.313)[logredo_volume_groups:2898] [[ -n /board_org ]] +epprd_rg:process_resources(5.315)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.315)[logredo_volume_groups:2900] grep -wp /dev/boardlv /etc/filesystems +epprd_rg:process_resources(5.320)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.320)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.320)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/boardlv ]] +epprd_rg:process_resources(5.320)[logredo_volume_groups:2895] odmget $'-qname = origlogAlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.324)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "origlogAlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.326)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.326)[logredo_volume_groups:2898] odmget -q 'name = origlogAlv and attribute = label' CuAt +epprd_rg:process_resources(5.330)[logredo_volume_groups:2898] [[ -n /oracle/EPP/origlogA ]] +epprd_rg:process_resources(5.332)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.332)[logredo_volume_groups:2900] grep -wp /dev/origlogAlv /etc/filesystems +epprd_rg:process_resources(5.337)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.337)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.337)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/origlogAlv ]] +epprd_rg:process_resources(5.338)[logredo_volume_groups:2895] odmget $'-qname = origlogBlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.341)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "origlogBlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.343)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.343)[logredo_volume_groups:2898] odmget -q 'name = origlogBlv and attribute = label' CuAt +epprd_rg:process_resources(5.347)[logredo_volume_groups:2898] [[ -n /oracle/EPP/origlogB ]] +epprd_rg:process_resources(5.349)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.349)[logredo_volume_groups:2900] grep -wp /dev/origlogBlv /etc/filesystems +epprd_rg:process_resources(5.354)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.354)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.354)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/origlogBlv ]] +epprd_rg:process_resources(5.354)[logredo_volume_groups:2895] odmget $'-qname = mirrlogAlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.358)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "mirrlogAlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.360)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.360)[logredo_volume_groups:2898] odmget -q 'name = mirrlogAlv and attribute = label' CuAt +epprd_rg:process_resources(5.364)[logredo_volume_groups:2898] [[ -n /oracle/EPP/mirrlogA ]] +epprd_rg:process_resources(5.366)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.366)[logredo_volume_groups:2900] grep -wp /dev/mirrlogAlv /etc/filesystems +epprd_rg:process_resources(5.371)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.371)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.371)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/mirrlogAlv ]] +epprd_rg:process_resources(5.372)[logredo_volume_groups:2895] odmget $'-qname = mirrlogBlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.375)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "mirrlogBlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.377)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.377)[logredo_volume_groups:2898] odmget -q 'name = mirrlogBlv and attribute = label' CuAt +epprd_rg:process_resources(5.381)[logredo_volume_groups:2898] [[ -n /oracle/EPP/mirrlogB ]] +epprd_rg:process_resources(5.383)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.383)[logredo_volume_groups:2900] grep -wp /dev/mirrlogBlv /etc/filesystems +epprd_rg:process_resources(5.388)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.388)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.388)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/mirrlogBlv ]] +epprd_rg:process_resources(5.389)[logredo_volume_groups:2910] : Remove any duplicates acquired so far +epprd_rg:process_resources(5.391)[logredo_volume_groups:2912] echo /dev/epprdaloglv +epprd_rg:process_resources(5.391)[logredo_volume_groups:2912] sort -u +epprd_rg:process_resources(5.391)[logredo_volume_groups:2912] tr ' ' '\n' +epprd_rg:process_resources(5.397)[logredo_volume_groups:2912] logdevs=/dev/epprdaloglv +epprd_rg:process_resources(5.397)[logredo_volume_groups:2915] : Run logredos in parallel to save time. +epprd_rg:process_resources(5.397)[logredo_volume_groups:2919] [[ -n '' ]] +epprd_rg:process_resources(5.397)[logredo_volume_groups:2944] : Run logredo only if the LV is closed. +epprd_rg:process_resources(5.397)[logredo_volume_groups:2946] awk '$1 ~ /^epprdaloglv$/ && $6 ~ /closed\// {print "CLOSED"}' /var/hacmp/log/.process_resources_logredo.20185524 +epprd_rg:process_resources(5.402)[logredo_volume_groups:2946] [[ -n CLOSED ]] +epprd_rg:process_resources(5.402)[logredo_volume_groups:2949] : Run logredo only if filesystem is not mounted on any of the node in the cluster. +epprd_rg:process_resources(5.402)[logredo_volume_groups:2951] [[ -z '' ]] +epprd_rg:process_resources(5.403)[logredo_volume_groups:2958] rm -f /var/hacmp/log/.process_resources_logredo.20185524 +epprd_rg:process_resources(5.403)[logredo_volume_groups:2953] logredo /dev/epprdaloglv +epprd_rg:process_resources(5.407)[logredo_volume_groups:2962] : Wait for the background logredos from the RGs +epprd_rg:process_resources(5.407)[logredo_volume_groups:2964] wait J2_LOGREDO:log redo processing for /dev/epprdaloglv +epprd_rg:process_resources(5.413)[logredo_volume_groups:2966] return 0 +epprd_rg:process_resources(5.413)[3324] true +epprd_rg:process_resources(5.413)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(5.413)[3328] set -a +epprd_rg:process_resources(5.413)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:06:58.021694 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(5.432)[3329] eval JOB_TYPE=FILESYSTEMS ACTION=ACQUIRE FILE_SYSTEMS='"/board_org,/oracle,/oracle/EPP,/oracle/EPP/mirrlogA,/oracle/EPP/mirrlogB,/oracle/EPP/oraarch,/oracle/EPP/origlogA,/oracle/EPP/origlogB,/oracle/EPP/sapdata1,/oracle/EPP/sapdata2,/oracle/EPP/sapdata3,/oracle/EPP/sapdata4,/sapmnt,/usr/sap"' RESOURCE_GROUPS='"epprd_rg' '"' FSCHECK_TOOLS='"fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck"' RECOVERY_METHODS='"sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential"' +epprd_rg:process_resources(5.433)[1] JOB_TYPE=FILESYSTEMS +epprd_rg:process_resources(5.433)[1] ACTION=ACQUIRE +epprd_rg:process_resources(5.433)[1] FILE_SYSTEMS=/board_org,/oracle,/oracle/EPP,/oracle/EPP/mirrlogA,/oracle/EPP/mirrlogB,/oracle/EPP/oraarch,/oracle/EPP/origlogA,/oracle/EPP/origlogB,/oracle/EPP/sapdata1,/oracle/EPP/sapdata2,/oracle/EPP/sapdata3,/oracle/EPP/sapdata4,/sapmnt,/usr/sap +epprd_rg:process_resources(5.433)[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources(5.433)[1] FSCHECK_TOOLS=fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck +epprd_rg:process_resources(5.433)[1] RECOVERY_METHODS=sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:process_resources(5.433)[3330] RC=0 +epprd_rg:process_resources(5.433)[3331] set +a +epprd_rg:process_resources(5.433)[3333] (( 0 != 0 )) +epprd_rg:process_resources(5.433)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(5.433)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(5.433)[3343] export GROUPNAME +epprd_rg:process_resources(5.433)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(5.433)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(5.433)[3360] [[ FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources(5.433)[3360] [[ FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources(5.433)[3482] process_file_systems ACQUIRE +epprd_rg:process_resources(5.433)[process_file_systems:2640] PS4_FUNC=process_file_systems +epprd_rg:process_resources(5.433)[process_file_systems:2640] typeset PS4_FUNC +epprd_rg:process_resources(5.433)[process_file_systems:2641] [[ high == high ]] +epprd_rg:process_resources(5.433)[process_file_systems:2641] set -x +epprd_rg:process_resources(5.433)[process_file_systems:2643] STAT=0 +epprd_rg:process_resources(5.433)[process_file_systems:2645] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources(5.433)[process_file_systems:2647] cl_activate_fs +epprd_rg:cl_activate_fs[819] version=1.1.8.5 +epprd_rg:cl_activate_fs[823] : Check for mounting OEM file systems +epprd_rg:cl_activate_fs[825] OEM_FS=false +epprd_rg:cl_activate_fs[826] (( 0 != 0 )) +epprd_rg:cl_activate_fs[832] STATUS=0 +epprd_rg:cl_activate_fs[832] typeset -li STATUS +epprd_rg:cl_activate_fs[833] EMULATE=REAL +epprd_rg:cl_activate_fs[836] : The environment variable MOUNT_WLMCNTRL_SELFMANAGE is referred inside mount. +epprd_rg:cl_activate_fs[837] : If this variable is set, few calls to wlmcntrl are skipped inside mount, which +epprd_rg:cl_activate_fs[838] : offers performance benefits. Hence we will export this variable if it is set +epprd_rg:cl_activate_fs[839] : in /etc/environment. +epprd_rg:cl_activate_fs[841] grep -w ^MOUNT_WLMCNTRL_SELFMANAGE /etc/environment +epprd_rg:cl_activate_fs[841] export eval +epprd_rg:cl_activate_fs[843] [[ -n FILESYSTEMS ]] +epprd_rg:cl_activate_fs[843] [[ FILESYSTEMS != GROUP ]] +epprd_rg:cl_activate_fs[846] : If JOB_TYPE is set, and it does not equal to GROUP, then +epprd_rg:cl_activate_fs[847] : we are processing for process_resources, which passes requests +epprd_rg:cl_activate_fs[848] : associaed with multiple resource groups through environment variables +epprd_rg:cl_activate_fs[850] activate_fs_process_resources +epprd_rg:cl_activate_fs[activate_fs_process_resources:716] [[ high == high ]] +epprd_rg:cl_activate_fs[activate_fs_process_resources:716] set -x +epprd_rg:cl_activate_fs[activate_fs_process_resources:718] ERRSTATUS=0 +epprd_rg:cl_activate_fs[activate_fs_process_resources:718] typeset -i ERRSTATUS +epprd_rg:cl_activate_fs[activate_fs_process_resources:719] RC=0 +epprd_rg:cl_activate_fs[activate_fs_process_resources:719] typeset -li RC +epprd_rg:cl_activate_fs[activate_fs_process_resources:742] export GROUPNAME +epprd_rg:cl_activate_fs[activate_fs_process_resources:745] : Get the file systems, recovery tool and procedure for this +epprd_rg:cl_activate_fs[activate_fs_process_resources:746] : resource group +epprd_rg:cl_activate_fs[activate_fs_process_resources:748] print /board_org,/oracle,/oracle/EPP,/oracle/EPP/mirrlogA,/oracle/EPP/mirrlogB,/oracle/EPP/oraarch,/oracle/EPP/origlogA,/oracle/EPP/origlogB,/oracle/EPP/sapdata1,/oracle/EPP/sapdata2,/oracle/EPP/sapdata3,/oracle/EPP/sapdata4,/sapmnt,/usr/sap +epprd_rg:cl_activate_fs[activate_fs_process_resources:748] read _RG_FILE_SYSTEMS FILE_SYSTEMS +epprd_rg:cl_activate_fs[activate_fs_process_resources:748] IFS=: +epprd_rg:cl_activate_fs[activate_fs_process_resources:749] print fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck +epprd_rg:cl_activate_fs[activate_fs_process_resources:749] read _RG_FSCHECK_TOOLS FSCHECK_TOOLS +epprd_rg:cl_activate_fs[activate_fs_process_resources:749] IFS=: +epprd_rg:cl_activate_fs[activate_fs_process_resources:750] print sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:cl_activate_fs[activate_fs_process_resources:750] read _RG_RECOVERY_METHODS RECOVERY_METHODS +epprd_rg:cl_activate_fs[activate_fs_process_resources:750] IFS=: +epprd_rg:cl_activate_fs[activate_fs_process_resources:753] : Since all file systems in a resource group use the same recovery +epprd_rg:cl_activate_fs[activate_fs_process_resources:754] : method and recovery means, just pick up the first one in the list +epprd_rg:cl_activate_fs[activate_fs_process_resources:756] print fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck +epprd_rg:cl_activate_fs[activate_fs_process_resources:756] read FSCHECK_TOOL rest +epprd_rg:cl_activate_fs[activate_fs_process_resources:756] IFS=, +epprd_rg:cl_activate_fs[activate_fs_process_resources:757] print sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:cl_activate_fs[activate_fs_process_resources:757] read RECOVERY_METHOD rest +epprd_rg:cl_activate_fs[activate_fs_process_resources:757] IFS=, +epprd_rg:cl_activate_fs[activate_fs_process_resources:760] : If there are any unmounted file systems for this resource group, go +epprd_rg:cl_activate_fs[activate_fs_process_resources:761] : recover and mount them. +epprd_rg:cl_activate_fs[activate_fs_process_resources:763] [[ -n /board_org,/oracle,/oracle/EPP,/oracle/EPP/mirrlogA,/oracle/EPP/mirrlogB,/oracle/EPP/oraarch,/oracle/EPP/origlogA,/oracle/EPP/origlogB,/oracle/EPP/sapdata1,/oracle/EPP/sapdata2,/oracle/EPP/sapdata3,/oracle/EPP/sapdata4,/sapmnt,/usr/sap ]] +epprd_rg:cl_activate_fs[activate_fs_process_resources:765] IFS=, +epprd_rg:cl_activate_fs[activate_fs_process_resources:765] set -- /board_org,/oracle,/oracle/EPP,/oracle/EPP/mirrlogA,/oracle/EPP/mirrlogB,/oracle/EPP/oraarch,/oracle/EPP/origlogA,/oracle/EPP/origlogB,/oracle/EPP/sapdata1,/oracle/EPP/sapdata2,/oracle/EPP/sapdata3,/oracle/EPP/sapdata4,/sapmnt,/usr/sap +epprd_rg:cl_activate_fs[activate_fs_process_resources:765] print /board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap +epprd_rg:cl_activate_fs[activate_fs_process_resources:765] RG_FILE_SYSTEMS='/board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap' +epprd_rg:cl_activate_fs[activate_fs_process_resources:766] activate_fs_process_group sequential fsck '/board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap' +epprd_rg:cl_activate_fs[activate_fs_process_group:362] PS4_LOOP='' +epprd_rg:cl_activate_fs[activate_fs_process_group:362] typeset PS4_LOOP +epprd_rg:cl_activate_fs[activate_fs_process_group:363] [[ high == high ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:363] set -x +epprd_rg:cl_activate_fs[activate_fs_process_group:365] typeset RECOVERY_METHOD FSCHECK_TOOL FILESYSTEMS +epprd_rg:cl_activate_fs[activate_fs_process_group:366] STATUS=0 +epprd_rg:cl_activate_fs[activate_fs_process_group:366] typeset -i STATUS +epprd_rg:cl_activate_fs[activate_fs_process_group:368] RECOVERY_METHOD=sequential +epprd_rg:cl_activate_fs[activate_fs_process_group:369] FSCHECK_TOOL=fsck +epprd_rg:cl_activate_fs[activate_fs_process_group:370] shift 2 +epprd_rg:cl_activate_fs[activate_fs_process_group:371] FILESYSTEMS='/board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap' +epprd_rg:cl_activate_fs[activate_fs_process_group:372] comm_failure='' +epprd_rg:cl_activate_fs[activate_fs_process_group:372] typeset comm_failure +epprd_rg:cl_activate_fs[activate_fs_process_group:373] rc_mount='' +epprd_rg:cl_activate_fs[activate_fs_process_group:373] typeset rc_mount +epprd_rg:cl_activate_fs[activate_fs_process_group:376] : Filter out duplicates, and file systems which are already mounted +epprd_rg:cl_activate_fs[activate_fs_process_group:378] mounts_to_do '/board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap' +epprd_rg:cl_activate_fs[mounts_to_do:283] tomount='/board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap' +epprd_rg:cl_activate_fs[mounts_to_do:283] typeset tomount +epprd_rg:cl_activate_fs[mounts_to_do:286] : Get most current list of mounted filesystems +epprd_rg:cl_activate_fs[mounts_to_do:288] mount +epprd_rg:cl_activate_fs[mounts_to_do:288] 2> /dev/null +epprd_rg:cl_activate_fs[mounts_to_do:288] paste -s - +epprd_rg:cl_activate_fs[mounts_to_do:288] awk '$3 ~ /jfs2*$/ {print $2}' +epprd_rg:cl_activate_fs[mounts_to_do:288] mounted=$'/\t/usr\t/var\t/tmp\t/home\t/admin\t/opt\t/var/adm/ras/livedump\t/ptf' +epprd_rg:cl_activate_fs[mounts_to_do:288] typeset mounted +epprd_rg:cl_activate_fs[mounts_to_do:291] shift +epprd_rg:cl_activate_fs[mounts_to_do:294] typeset -A mountedArray tomountArray +epprd_rg:cl_activate_fs[mounts_to_do:295] typeset fs +epprd_rg:cl_activate_fs[mounts_to_do:298] : Create an associative array for each list, which +epprd_rg:cl_activate_fs[mounts_to_do:299] : has the side effect of dropping any duplicates +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/usr]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/var]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/tmp]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/home]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/admin]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/opt]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/var/adm/ras/livedump]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/ptf]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/board_org]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/mirrlogA]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/mirrlogB]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/oraarch]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/origlogA]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/origlogB]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/sapdata1]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/sapdata2]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/sapdata3]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/sapdata4]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/sapmnt]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/usr/sap]=1 +epprd_rg:cl_activate_fs[mounts_to_do:310] mounted='' +epprd_rg:cl_activate_fs[mounts_to_do:311] tomount='' +epprd_rg:cl_activate_fs[mounts_to_do:314] : expand fs from all tomountArray subscript names +epprd_rg:cl_activate_fs[mounts_to_do:316] set +u +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:329] : Print all subscript names which are all remaining mount +epprd_rg:cl_activate_fs[mounts_to_do:330] : points which have to be mounted +epprd_rg:cl_activate_fs[mounts_to_do:332] print /board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap +epprd_rg:cl_activate_fs[mounts_to_do:332] sort -u +epprd_rg:cl_activate_fs[mounts_to_do:332] tr ' ' '\n' +epprd_rg:cl_activate_fs[mounts_to_do:334] set -u +epprd_rg:cl_activate_fs[activate_fs_process_group:378] FILESYSTEMS=$'/board_org\n/oracle\n/oracle/EPP\n/oracle/EPP/mirrlogA\n/oracle/EPP/mirrlogB\n/oracle/EPP/oraarch\n/oracle/EPP/origlogA\n/oracle/EPP/origlogB\n/oracle/EPP/sapdata1\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata4\n/sapmnt\n/usr/sap' +epprd_rg:cl_activate_fs[activate_fs_process_group:379] [[ -z $'/board_org\n/oracle\n/oracle/EPP\n/oracle/EPP/mirrlogA\n/oracle/EPP/mirrlogB\n/oracle/EPP/oraarch\n/oracle/EPP/origlogA\n/oracle/EPP/origlogB\n/oracle/EPP/sapdata1\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata4\n/sapmnt\n/usr/sap' ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:385] : Get unique temporary file names by using the resource group and the +epprd_rg:cl_activate_fs[activate_fs_process_group:386] : current process ID +epprd_rg:cl_activate_fs[activate_fs_process_group:388] [[ -z epprd_rg ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:397] TMP_FILENAME=epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs[activate_fs_process_group:398] rm -f /tmp/epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs[activate_fs_process_group:401] : If FSCHECK_TOOL is null get from ODM +epprd_rg:cl_activate_fs[activate_fs_process_group:403] [[ -z fsck ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:408] print fsck +epprd_rg:cl_activate_fs[activate_fs_process_group:408] FSCHECK_TOOL=fsck +epprd_rg:cl_activate_fs[activate_fs_process_group:409] [[ fsck != fsck ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:416] : If RECOVERY_METHOD is null get from ODM +epprd_rg:cl_activate_fs[activate_fs_process_group:418] [[ -z sequential ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:423] print sequential +epprd_rg:cl_activate_fs[activate_fs_process_group:423] RECOVERY_METHOD=sequential +epprd_rg:cl_activate_fs[activate_fs_process_group:424] [[ sequential != sequential ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:431] set -u +epprd_rg:cl_activate_fs[activate_fs_process_group:434] : If FSCHECK_TOOL is set to logredo, the logredo for each jfslog has +epprd_rg:cl_activate_fs[activate_fs_process_group:435] : already been done in get_disk_vg_fs, so we only need to do fsck check +epprd_rg:cl_activate_fs[activate_fs_process_group:436] : and recovery here before going on to do the mounts +epprd_rg:cl_activate_fs[activate_fs_process_group:438] [[ fsck == fsck ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:441] TOOL='/usr/sbin/fsck -f -p -o nologredo' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:445] PS4_LOOP=/board_org +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:446] lsfs /board_org +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:446] grep -w /board_org +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:449] : Verify if any of the file system /board_org is already mounted anywhere +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] lsfs -qc /board_org +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:463] fsdb /board_org +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/board_org\n\nFile System Size:\t\t10485032\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t16384\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000009ffd28\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t91\n[10] s_agsize:\t\t0x00004000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x0013ffa5\n \t\t s_fsckpxd.address:\t1310629\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'boardl\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5832\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000000b5\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t181\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f1\t[52] last unmounted:\t0x63d4e560\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/board_org\n\nFile System Size:\t\t10485032\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t16384\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000009ffd28\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t91\n[10] s_agsize:\t\t0x00004000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x0013ffa5\n \t\t s_fsckpxd.address:\t1310629\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'boardl\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5832\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000000b5\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t181\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f1\t[52] last unmounted:\t0x63d4e560\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/boardlv The current volume is: /dev/boardlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:445] PS4_LOOP=/oracle +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:446] lsfs /oracle +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:446] grep -w /oracle +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:449] : Verify if any of the file system /oracle is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] lsfs -qc /oracle +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:463] fsdb /oracle +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle\n\nFile System Size:\t\t41941352\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t65536\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000027ff968\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t211\n[10] s_agsize:\t\t0x00010000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x004fff2d\n \t\t s_fsckpxd.address:\t5242669\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'oracle\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5819\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000295\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t661\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f1\t[52] last unmounted:\t0x63d4e560\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle\n\nFile System Size:\t\t41941352\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t65536\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000027ff968\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t211\n[10] s_agsize:\t\t0x00010000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x004fff2d\n \t\t s_fsckpxd.address:\t5242669\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'oracle\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5819\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000295\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t661\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f1\t[52] last unmounted:\t0x63d4e560\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/oraclelv The current volume is: /dev/oraclelv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:446] lsfs /oracle/EPP +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:446] grep -w /oracle/EPP +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] lsfs -qc /oracle/EPP +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:463] fsdb /oracle/EPP +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP\n\nFile System Size:\t\t62912232\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t65536\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x0000000003bff6e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t291\n[10] s_agsize:\t\t0x00010000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x0077fedd\n \t\t s_fsckpxd.address:\t7864029\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'epplv\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5824\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000003d5\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t981\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f1\t[52] last unmounted:\t0x63d4e560\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP\n\nFile System Size:\t\t62912232\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t65536\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x0000000003bff6e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t291\n[10] s_agsize:\t\t0x00010000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x0077fedd\n \t\t s_fsckpxd.address:\t7864029\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'epplv\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5824\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000003d5\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t981\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f1\t[52] last unmounted:\t0x63d4e560\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/epplv The current volume is: /dev/epplv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:446] lsfs /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:446] grep -w /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/mirrlogA is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] lsfs -qc /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:463] fsdb /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/mirrlogA\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'mirrlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5834\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f1\t[52] last unmounted:\t0x63d4e560\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/mirrlogA\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'mirrlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5834\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f1\t[52] last unmounted:\t0x63d4e560\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/mirrlogAlv The current volume is: /dev/mirrlogAlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:446] lsfs /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:446] grep -w /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/mirrlogB is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] lsfs -qc /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:463] fsdb /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/mirrlogB\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'mirrlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5835\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f1\t[52] last unmounted:\t0x63d4e55f\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/mirrlogB\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'mirrlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5835\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f1\t[52] last unmounted:\t0x63d4e55f\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/mirrlogBlv The current volume is: /dev/mirrlogBlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/oraarch +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:446] lsfs /oracle/EPP/oraarch +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:446] grep -w /oracle/EPP/oraarch +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/oraarch is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] lsfs -qc /oracle/EPP/oraarch +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:463] fsdb /oracle/EPP/oraarch +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/oraarch\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'oraarc\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d582e\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f1\t[52] last unmounted:\t0x63d4e55f\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/oraarch\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'oraarc\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d582e\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f1\t[52] last unmounted:\t0x63d4e55f\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/oraarchlv The current volume is: /dev/oraarchlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/origlogA +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:446] lsfs /oracle/EPP/origlogA +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:446] grep -w /oracle/EPP/origlogA +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/origlogA is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] lsfs -qc /oracle/EPP/origlogA +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:463] fsdb /oracle/EPP/origlogA +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/origlogA\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'origlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5832\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f1\t[52] last unmounted:\t0x63d4e55f\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/origlogA\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'origlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5832\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f1\t[52] last unmounted:\t0x63d4e55f\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/origlogAlv The current volume is: /dev/origlogAlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/origlogB +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:446] lsfs /oracle/EPP/origlogB +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:446] grep -w /oracle/EPP/origlogB +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/origlogB is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] lsfs -qc /oracle/EPP/origlogB +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:463] fsdb /oracle/EPP/origlogB +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/origlogB\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'origlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5833\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f2\t[52] last unmounted:\t0x63d4e55f\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/origlogB\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'origlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5833\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f2\t[52] last unmounted:\t0x63d4e55f\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/origlogBlv The current volume is: /dev/origlogBlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:446] lsfs /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:446] grep -w /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/sapdata1 is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] lsfs -qc /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:463] fsdb /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/sapdata1\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d582f\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f2\t[52] last unmounted:\t0x63d4e55f\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/sapdata1\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d582f\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f2\t[52] last unmounted:\t0x63d4e55f\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/sapdata1lv The current volume is: /dev/sapdata1lv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:446] lsfs /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:446] grep -w /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/sapdata2 is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] lsfs -qc /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:463] fsdb /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/sapdata2\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5830\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f2\t[52] last unmounted:\t0x63d4e55f\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/sapdata2\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5830\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f2\t[52] last unmounted:\t0x63d4e55f\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/sapdata2lv The current volume is: /dev/sapdata2lv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:446] lsfs /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:446] grep -w /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/sapdata3 is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] lsfs -qc /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:463] fsdb /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/sapdata3\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5831\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f2\t[52] last unmounted:\t0x63d4e55e\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/sapdata3\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5831\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f2\t[52] last unmounted:\t0x63d4e55e\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/sapdata3lv The current volume is: /dev/sapdata3lv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:446] lsfs /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:446] grep -w /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/sapdata4 is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] lsfs -qc /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:463] fsdb /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/sapdata4\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5831\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f2\t[52] last unmounted:\t0x63d4e55e\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/sapdata4\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5831\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f2\t[52] last unmounted:\t0x63d4e55e\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/sapdata4lv The current volume is: /dev/sapdata4lv Primary superblock is valid. +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:445] PS4_LOOP=/sapmnt +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:446] lsfs /sapmnt +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:446] grep -w /sapmnt +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:449] : Verify if any of the file system /sapmnt is already mounted anywhere +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] lsfs -qc /sapmnt +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:463] fsdb /sapmnt +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/sapmnt\n\nFile System Size:\t\t20970472\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t32768\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000013ffbe8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t131\n[10] s_agsize:\t\t0x00008000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x0027ff7d\n \t\t s_fsckpxd.address:\t2621309\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapmnt\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5818\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000155\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t341\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f2\t[52] last unmounted:\t0x63d4e55e\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/sapmnt\n\nFile System Size:\t\t20970472\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t32768\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000013ffbe8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t131\n[10] s_agsize:\t\t0x00008000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x0027ff7d\n \t\t s_fsckpxd.address:\t2621309\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapmnt\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5818\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000155\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t341\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f2\t[52] last unmounted:\t0x63d4e55e\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/sapmntlv The current volume is: /dev/sapmntlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:445] PS4_LOOP=/usr/sap +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:446] lsfs /usr/sap +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:446] grep -w /usr/sap +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:449] : Verify if any of the file system /usr/sap is already mounted anywhere +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] lsfs -qc /usr/sap +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:463] fsdb /usr/sap +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/usr/sap\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'saplv\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5815\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f2\t[52] last unmounted:\t0x63d4e55e\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/usr/sap\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000003\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'saplv\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5815\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4e4f2\t[52] last unmounted:\t0x63d4e55e\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/saplv The current volume is: /dev/saplv Primary superblock is valid. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:513] : Allow any backgrounded fsck operations to finish +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:515] wait +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:519] : Now attempt to mount all the file systems +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:521] ALLFS=All_filesystems +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:522] cl_RMupdate resource_acquiring All_filesystems cl_activate_fs 2023-01-28T18:06:58.791299 2023-01-28T18:06:58.795724 +epprd_rg:cl_activate_fs(0.763):/usr/sap[activate_fs_process_group:524] PS4_TIMER=true +epprd_rg:cl_activate_fs(0.763):/usr/sap[activate_fs_process_group:524] typeset PS4_TIMER +epprd_rg:cl_activate_fs(0.763):/board_org[activate_fs_process_group:527] PS4_LOOP=/board_org +epprd_rg:cl_activate_fs(0.763):/board_org[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(0.763):/board_org[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(0.763):/board_org[activate_fs_process_group:540] fs_mount /board_org fsck epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(0.763):/board_org[fs_mount:69] FS=/board_org +epprd_rg:cl_activate_fs(0.763):/board_org[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(0.763):/board_org[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(0.763):/board_org[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(0.763):/board_org[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(0.763):/board_org[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(0.763):/board_org[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(0.763):/board_org[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(0.763):/board_org[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(0.763):/board_org[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(0.763):/board_org[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(0.763):/board_org[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(0.763):/board_org[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(0.763):/board_org[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(0.763):/board_org[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(0.763):/board_org[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(0.763):/board_org[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(0.763):/board_org[fs_mount:81] : Here check to see if the information in /etc/filesystems for /board_org +epprd_rg:cl_activate_fs(0.763):/board_org[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(0.763):/board_org[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(0.763):/board_org[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(0.763):/board_org[fs_mount:86] lsfs -c /board_org +epprd_rg:cl_activate_fs(0.764):/board_org[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(0.769):/board_org[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(0.764):/board_org[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(0.769):/board_org[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(0.769):/board_org[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(0.770):/board_org[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(0.764):/board_org[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(0.771):/board_org[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(0.772):/board_org[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(0.772):/board_org[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(0.773):/board_org[fs_mount:100] LV_name=boardlv +epprd_rg:cl_activate_fs(0.773):/board_org[fs_mount:101] getlvcb -T -A boardlv +epprd_rg:cl_activate_fs(0.774):/board_org[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(0.792):/board_org[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(0.774):/board_org[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.11 \n\t lvname = boardlv \n\t label = /board_org \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:47 2022\n \t time modified = Sat Jan 28 17:10:40 2023\n ' +epprd_rg:cl_activate_fs(0.792):/board_org[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(0.792):/board_org[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(0.793):/board_org[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(0.774):/board_org[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.11 \n\t lvname = boardlv \n\t label = /board_org \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:47 2022\n \t time modified = Sat Jan 28 17:10:40 2023\n ' +epprd_rg:cl_activate_fs(0.794):/board_org[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(0.795):/board_org[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(0.796):/board_org[fs_mount:115] clodmget -q 'name = boardlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(0.799):/board_org[fs_mount:115] CuAt_label=/board_org +epprd_rg:cl_activate_fs(0.799):/board_org[fs_mount:118] : At this point, if things are working correctly, /board_org from /etc/filesystems +epprd_rg:cl_activate_fs(0.799):/board_org[fs_mount:119] : should match /board_org from CuAt ODM and /board_org from the LVCB +epprd_rg:cl_activate_fs(0.799):/board_org[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(0.799):/board_org[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(0.799):/board_org[fs_mount:123] [[ /board_org != /board_org ]] +epprd_rg:cl_activate_fs(0.799):/board_org[fs_mount:128] [[ /board_org != /board_org ]] +epprd_rg:cl_activate_fs(0.799):/board_org[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(0.799):/board_org[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(0.800):/board_org[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(0.819):/board_org[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(0.820):/board_org[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(0.820):/board_org[fs_mount:160] amlog_trace '' 'Activating Filesystem|/board_org' +epprd_rg:cl_activate_fs(0.820):/board_org[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(0.820):/board_org[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(0.844):/board_org[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(0.847):/board_org[amlog_trace:319] DATE=2023-01-28T18:06:58.880260 +epprd_rg:cl_activate_fs(0.847):/board_org[amlog_trace:320] echo '|2023-01-28T18:06:58.880260|INFO: Activating Filesystem|/board_org' +epprd_rg:cl_activate_fs(0.847):/board_org[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(0.847):/board_org[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(0.850):/board_org[fs_mount:162] : Try to mount filesystem /board_org at Jan 28 18:06:58.000 +epprd_rg:cl_activate_fs(0.850):/board_org[fs_mount:163] mount /board_org +epprd_rg:cl_activate_fs(0.862):/board_org[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(0.862):/board_org[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(0.862):/board_org[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(0.862):/board_org[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/board_org' +epprd_rg:cl_activate_fs(0.862):/board_org[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(0.863):/board_org[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(0.887):/board_org[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(0.889):/board_org[amlog_trace:319] DATE=2023-01-28T18:06:58.922588 +epprd_rg:cl_activate_fs(0.889):/board_org[amlog_trace:320] echo '|2023-01-28T18:06:58.922588|INFO: Activating Filesystems completed|/board_org' +epprd_rg:cl_activate_fs(0.889):/board_org[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(0.889):/board_org[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(0.889):/board_org[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(0.889):/board_org[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(0.889):/board_org[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(0.889):/board_org[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(0.889):/board_org[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(0.889):/board_org[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(0.889):/board_org[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(0.889):/board_org[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(0.889):/board_org[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(0.890):/board_org[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(0.891):/board_org[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(0.893):/board_org[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(0.893):/board_org[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(0.893):/board_org[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(0.893):/board_org[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(0.893):/board_org[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(0.893):/board_org[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(0.893):/board_org[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(0.893):/board_org[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(0.774):/board_org[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.11 \n\t lvname = boardlv \n\t label = /board_org \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:47 2022\n \t time modified = Sat Jan 28 17:10:40 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(0.893):/board_org[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(0.893):/oracle[activate_fs_process_group:527] PS4_LOOP=/oracle +epprd_rg:cl_activate_fs(0.893):/oracle[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(0.893):/oracle[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(0.893):/oracle[activate_fs_process_group:540] fs_mount /oracle fsck epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(0.893):/oracle[fs_mount:69] FS=/oracle +epprd_rg:cl_activate_fs(0.893):/oracle[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(0.893):/oracle[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(0.893):/oracle[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(0.893):/oracle[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(0.893):/oracle[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(0.894):/oracle[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(0.894):/oracle[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(0.894):/oracle[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(0.894):/oracle[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(0.894):/oracle[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(0.894):/oracle[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(0.894):/oracle[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(0.894):/oracle[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(0.894):/oracle[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(0.894):/oracle[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(0.894):/oracle[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(0.894):/oracle[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle +epprd_rg:cl_activate_fs(0.894):/oracle[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(0.894):/oracle[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(0.894):/oracle[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(0.894):/oracle[fs_mount:86] lsfs -c /oracle +epprd_rg:cl_activate_fs(0.895):/oracle[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(0.900):/oracle[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(0.895):/oracle[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle:/dev/oraclelv:jfs2:::41943040:rw:no:no' +epprd_rg:cl_activate_fs(0.900):/oracle[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(0.900):/oracle[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(0.901):/oracle[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(0.895):/oracle[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle:/dev/oraclelv:jfs2:::41943040:rw:no:no' +epprd_rg:cl_activate_fs(0.901):/oracle[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(0.902):/oracle[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(0.903):/oracle[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(0.904):/oracle[fs_mount:100] LV_name=oraclelv +epprd_rg:cl_activate_fs(0.904):/oracle[fs_mount:101] getlvcb -T -A oraclelv +epprd_rg:cl_activate_fs(0.905):/oracle[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(0.922):/oracle[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(0.905):/oracle[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.4 \n\t lvname = oraclelv \n\t label = /oracle \n\t machine id = 44AF14B00 \n\t number lps = 40 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:42 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' +epprd_rg:cl_activate_fs(0.922):/oracle[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(0.922):/oracle[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(0.923):/oracle[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(0.905):/oracle[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.4 \n\t lvname = oraclelv \n\t label = /oracle \n\t machine id = 44AF14B00 \n\t number lps = 40 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:42 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' +epprd_rg:cl_activate_fs(0.924):/oracle[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(0.925):/oracle[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(0.926):/oracle[fs_mount:115] clodmget -q 'name = oraclelv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(0.929):/oracle[fs_mount:115] CuAt_label=/oracle +epprd_rg:cl_activate_fs(0.929):/oracle[fs_mount:118] : At this point, if things are working correctly, /oracle from /etc/filesystems +epprd_rg:cl_activate_fs(0.929):/oracle[fs_mount:119] : should match /oracle from CuAt ODM and /oracle from the LVCB +epprd_rg:cl_activate_fs(0.929):/oracle[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(0.929):/oracle[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(0.929):/oracle[fs_mount:123] [[ /oracle != /oracle ]] +epprd_rg:cl_activate_fs(0.930):/oracle[fs_mount:128] [[ /oracle != /oracle ]] +epprd_rg:cl_activate_fs(0.930):/oracle[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(0.930):/oracle[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(0.930):/oracle[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(0.949):/oracle[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(0.949):/oracle[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(0.949):/oracle[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle' +epprd_rg:cl_activate_fs(0.949):/oracle[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(0.950):/oracle[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(0.974):/oracle[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(0.977):/oracle[amlog_trace:319] DATE=2023-01-28T18:06:59.010261 +epprd_rg:cl_activate_fs(0.977):/oracle[amlog_trace:320] echo '|2023-01-28T18:06:59.010261|INFO: Activating Filesystem|/oracle' +epprd_rg:cl_activate_fs(0.977):/oracle[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(0.977):/oracle[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(0.980):/oracle[fs_mount:162] : Try to mount filesystem /oracle at Jan 28 18:06:59.000 +epprd_rg:cl_activate_fs(0.980):/oracle[fs_mount:163] mount /oracle +epprd_rg:cl_activate_fs(0.991):/oracle[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(0.991):/oracle[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(0.991):/oracle[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(0.991):/oracle[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle' +epprd_rg:cl_activate_fs(0.991):/oracle[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(0.992):/oracle[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.016):/oracle[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.019):/oracle[amlog_trace:319] DATE=2023-01-28T18:06:59.052102 +epprd_rg:cl_activate_fs(1.019):/oracle[amlog_trace:320] echo '|2023-01-28T18:06:59.052102|INFO: Activating Filesystems completed|/oracle' +epprd_rg:cl_activate_fs(1.019):/oracle[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.019):/oracle[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.019):/oracle[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.019):/oracle[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.019):/oracle[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.019):/oracle[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.019):/oracle[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.019):/oracle[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.019):/oracle[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.019):/oracle[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.019):/oracle[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.020):/oracle[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.021):/oracle[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.022):/oracle[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.023):/oracle[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.023):/oracle[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.023):/oracle[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.023):/oracle[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.023):/oracle[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.023):/oracle[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.023):/oracle[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(0.905):/oracle[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.4 \n\t lvname = oraclelv \n\t label = /oracle \n\t machine id = 44AF14B00 \n\t number lps = 40 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:42 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.023):/oracle[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[activate_fs_process_group:540] fs_mount /oracle/EPP fsck epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[fs_mount:69] FS=/oracle/EPP +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.023):/oracle/EPP[fs_mount:86] lsfs -c /oracle/EPP +epprd_rg:cl_activate_fs(1.024):/oracle/EPP[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.029):/oracle/EPP[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.024):/oracle/EPP[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP:/dev/epplv:jfs2:::62914560:rw:no:no' +epprd_rg:cl_activate_fs(1.029):/oracle/EPP[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.029):/oracle/EPP[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.030):/oracle/EPP[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.024):/oracle/EPP[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP:/dev/epplv:jfs2:::62914560:rw:no:no' +epprd_rg:cl_activate_fs(1.031):/oracle/EPP[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.032):/oracle/EPP[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.032):/oracle/EPP[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.033):/oracle/EPP[fs_mount:100] LV_name=epplv +epprd_rg:cl_activate_fs(1.033):/oracle/EPP[fs_mount:101] getlvcb -T -A epplv +epprd_rg:cl_activate_fs(1.034):/oracle/EPP[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.052):/oracle/EPP[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.034):/oracle/EPP[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.5 \n\t lvname = epplv \n\t label = /oracle/EPP \n\t machine id = 44AF14B00 \n\t number lps = 60 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' +epprd_rg:cl_activate_fs(1.052):/oracle/EPP[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.052):/oracle/EPP[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.053):/oracle/EPP[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.034):/oracle/EPP[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.5 \n\t lvname = epplv \n\t label = /oracle/EPP \n\t machine id = 44AF14B00 \n\t number lps = 60 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' +epprd_rg:cl_activate_fs(1.054):/oracle/EPP[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.054):/oracle/EPP[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.056):/oracle/EPP[fs_mount:115] clodmget -q 'name = epplv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:115] CuAt_label=/oracle/EPP +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP from /etc/filesystems +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:119] : should match /oracle/EPP from CuAt ODM and /oracle/EPP from the LVCB +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:123] [[ /oracle/EPP != /oracle/EPP ]] +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:128] [[ /oracle/EPP != /oracle/EPP ]] +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.079):/oracle/EPP[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.079):/oracle/EPP[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.079):/oracle/EPP[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP' +epprd_rg:cl_activate_fs(1.079):/oracle/EPP[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.080):/oracle/EPP[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.104):/oracle/EPP[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.107):/oracle/EPP[amlog_trace:319] DATE=2023-01-28T18:06:59.139998 +epprd_rg:cl_activate_fs(1.107):/oracle/EPP[amlog_trace:320] echo '|2023-01-28T18:06:59.139998|INFO: Activating Filesystem|/oracle/EPP' +epprd_rg:cl_activate_fs(1.107):/oracle/EPP[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.107):/oracle/EPP[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.109):/oracle/EPP[fs_mount:162] : Try to mount filesystem /oracle/EPP at Jan 28 18:06:59.000 +epprd_rg:cl_activate_fs(1.109):/oracle/EPP[fs_mount:163] mount /oracle/EPP +epprd_rg:cl_activate_fs(1.121):/oracle/EPP[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.121):/oracle/EPP[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.121):/oracle/EPP[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.121):/oracle/EPP[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP' +epprd_rg:cl_activate_fs(1.121):/oracle/EPP[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.122):/oracle/EPP[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.146):/oracle/EPP[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.148):/oracle/EPP[amlog_trace:319] DATE=2023-01-28T18:06:59.181796 +epprd_rg:cl_activate_fs(1.148):/oracle/EPP[amlog_trace:320] echo '|2023-01-28T18:06:59.181796|INFO: Activating Filesystems completed|/oracle/EPP' +epprd_rg:cl_activate_fs(1.148):/oracle/EPP[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.149):/oracle/EPP[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.149):/oracle/EPP[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.149):/oracle/EPP[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.149):/oracle/EPP[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.149):/oracle/EPP[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.149):/oracle/EPP[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.149):/oracle/EPP[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.149):/oracle/EPP[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.149):/oracle/EPP[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.149):/oracle/EPP[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.150):/oracle/EPP[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.150):/oracle/EPP[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.152):/oracle/EPP[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.152):/oracle/EPP[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.152):/oracle/EPP[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.152):/oracle/EPP[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.152):/oracle/EPP[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.152):/oracle/EPP[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.152):/oracle/EPP[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.152):/oracle/EPP[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.034):/oracle/EPP[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.5 \n\t lvname = epplv \n\t label = /oracle/EPP \n\t machine id = 44AF14B00 \n\t number lps = 60 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.153):/oracle/EPP[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[activate_fs_process_group:540] fs_mount /oracle/EPP/mirrlogA fsck epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[fs_mount:69] FS=/oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.153):/oracle/EPP/mirrlogA[fs_mount:86] lsfs -c /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.154):/oracle/EPP/mirrlogA[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.159):/oracle/EPP/mirrlogA[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.154):/oracle/EPP/mirrlogA[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogA:/dev/mirrlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.159):/oracle/EPP/mirrlogA[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.159):/oracle/EPP/mirrlogA[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.160):/oracle/EPP/mirrlogA[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.154):/oracle/EPP/mirrlogA[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogA:/dev/mirrlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.161):/oracle/EPP/mirrlogA[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.161):/oracle/EPP/mirrlogA[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.162):/oracle/EPP/mirrlogA[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[fs_mount:100] LV_name=mirrlogAlv +epprd_rg:cl_activate_fs(1.163):/oracle/EPP/mirrlogA[fs_mount:101] getlvcb -T -A mirrlogAlv +epprd_rg:cl_activate_fs(1.164):/oracle/EPP/mirrlogA[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.181):/oracle/EPP/mirrlogA[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.164):/oracle/EPP/mirrlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.14 \n\t lvname = mirrlogAlv \n\t label = /oracle/EPP/mirrlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' +epprd_rg:cl_activate_fs(1.181):/oracle/EPP/mirrlogA[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.181):/oracle/EPP/mirrlogA[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.182):/oracle/EPP/mirrlogA[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.164):/oracle/EPP/mirrlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.14 \n\t lvname = mirrlogAlv \n\t label = /oracle/EPP/mirrlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' +epprd_rg:cl_activate_fs(1.183):/oracle/EPP/mirrlogA[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.184):/oracle/EPP/mirrlogA[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.185):/oracle/EPP/mirrlogA[fs_mount:115] clodmget -q 'name = mirrlogAlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.189):/oracle/EPP/mirrlogA[fs_mount:115] CuAt_label=/oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.189):/oracle/EPP/mirrlogA[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/mirrlogA from /etc/filesystems +epprd_rg:cl_activate_fs(1.189):/oracle/EPP/mirrlogA[fs_mount:119] : should match /oracle/EPP/mirrlogA from CuAt ODM and /oracle/EPP/mirrlogA from the LVCB +epprd_rg:cl_activate_fs(1.189):/oracle/EPP/mirrlogA[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.189):/oracle/EPP/mirrlogA[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.189):/oracle/EPP/mirrlogA[fs_mount:123] [[ /oracle/EPP/mirrlogA != /oracle/EPP/mirrlogA ]] +epprd_rg:cl_activate_fs(1.189):/oracle/EPP/mirrlogA[fs_mount:128] [[ /oracle/EPP/mirrlogA != /oracle/EPP/mirrlogA ]] +epprd_rg:cl_activate_fs(1.189):/oracle/EPP/mirrlogA[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.189):/oracle/EPP/mirrlogA[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.189):/oracle/EPP/mirrlogA[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.209):/oracle/EPP/mirrlogA[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.209):/oracle/EPP/mirrlogA[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.209):/oracle/EPP/mirrlogA[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_activate_fs(1.209):/oracle/EPP/mirrlogA[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.209):/oracle/EPP/mirrlogA[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.233):/oracle/EPP/mirrlogA[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.236):/oracle/EPP/mirrlogA[amlog_trace:319] DATE=2023-01-28T18:06:59.269556 +epprd_rg:cl_activate_fs(1.236):/oracle/EPP/mirrlogA[amlog_trace:320] echo '|2023-01-28T18:06:59.269556|INFO: Activating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_activate_fs(1.236):/oracle/EPP/mirrlogA[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.236):/oracle/EPP/mirrlogA[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.239):/oracle/EPP/mirrlogA[fs_mount:162] : Try to mount filesystem /oracle/EPP/mirrlogA at Jan 28 18:06:59.000 +epprd_rg:cl_activate_fs(1.239):/oracle/EPP/mirrlogA[fs_mount:163] mount /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.251):/oracle/EPP/mirrlogA[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.251):/oracle/EPP/mirrlogA[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.251):/oracle/EPP/mirrlogA[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.251):/oracle/EPP/mirrlogA[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/mirrlogA' +epprd_rg:cl_activate_fs(1.251):/oracle/EPP/mirrlogA[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.252):/oracle/EPP/mirrlogA[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.275):/oracle/EPP/mirrlogA[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.278):/oracle/EPP/mirrlogA[amlog_trace:319] DATE=2023-01-28T18:06:59.311480 +epprd_rg:cl_activate_fs(1.278):/oracle/EPP/mirrlogA[amlog_trace:320] echo '|2023-01-28T18:06:59.311480|INFO: Activating Filesystems completed|/oracle/EPP/mirrlogA' +epprd_rg:cl_activate_fs(1.278):/oracle/EPP/mirrlogA[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.278):/oracle/EPP/mirrlogA[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.278):/oracle/EPP/mirrlogA[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.278):/oracle/EPP/mirrlogA[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.278):/oracle/EPP/mirrlogA[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.278):/oracle/EPP/mirrlogA[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.278):/oracle/EPP/mirrlogA[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.278):/oracle/EPP/mirrlogA[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.278):/oracle/EPP/mirrlogA[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.278):/oracle/EPP/mirrlogA[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.278):/oracle/EPP/mirrlogA[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.279):/oracle/EPP/mirrlogA[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.280):/oracle/EPP/mirrlogA[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.282):/oracle/EPP/mirrlogA[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.282):/oracle/EPP/mirrlogA[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.282):/oracle/EPP/mirrlogA[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.282):/oracle/EPP/mirrlogA[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.282):/oracle/EPP/mirrlogA[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.282):/oracle/EPP/mirrlogA[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.282):/oracle/EPP/mirrlogA[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.282):/oracle/EPP/mirrlogA[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.164):/oracle/EPP/mirrlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.14 \n\t lvname = mirrlogAlv \n\t label = /oracle/EPP/mirrlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.282):/oracle/EPP/mirrlogA[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.282):/oracle/EPP/mirrlogB[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.282):/oracle/EPP/mirrlogB[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.282):/oracle/EPP/mirrlogB[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.282):/oracle/EPP/mirrlogB[activate_fs_process_group:540] fs_mount /oracle/EPP/mirrlogB fsck epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(1.282):/oracle/EPP/mirrlogB[fs_mount:69] FS=/oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.282):/oracle/EPP/mirrlogB[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.282):/oracle/EPP/mirrlogB[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.282):/oracle/EPP/mirrlogB[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.282):/oracle/EPP/mirrlogB[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(1.282):/oracle/EPP/mirrlogB[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.282):/oracle/EPP/mirrlogB[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.282):/oracle/EPP/mirrlogB[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.282):/oracle/EPP/mirrlogB[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.283):/oracle/EPP/mirrlogB[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.283):/oracle/EPP/mirrlogB[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.283):/oracle/EPP/mirrlogB[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.283):/oracle/EPP/mirrlogB[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.283):/oracle/EPP/mirrlogB[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.283):/oracle/EPP/mirrlogB[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.283):/oracle/EPP/mirrlogB[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.283):/oracle/EPP/mirrlogB[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.283):/oracle/EPP/mirrlogB[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.283):/oracle/EPP/mirrlogB[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.283):/oracle/EPP/mirrlogB[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.283):/oracle/EPP/mirrlogB[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.283):/oracle/EPP/mirrlogB[fs_mount:86] lsfs -c /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.284):/oracle/EPP/mirrlogB[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.288):/oracle/EPP/mirrlogB[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.284):/oracle/EPP/mirrlogB[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogB:/dev/mirrlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.288):/oracle/EPP/mirrlogB[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.288):/oracle/EPP/mirrlogB[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.289):/oracle/EPP/mirrlogB[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.284):/oracle/EPP/mirrlogB[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogB:/dev/mirrlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.290):/oracle/EPP/mirrlogB[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.291):/oracle/EPP/mirrlogB[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.291):/oracle/EPP/mirrlogB[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.292):/oracle/EPP/mirrlogB[fs_mount:100] LV_name=mirrlogBlv +epprd_rg:cl_activate_fs(1.292):/oracle/EPP/mirrlogB[fs_mount:101] getlvcb -T -A mirrlogBlv +epprd_rg:cl_activate_fs(1.293):/oracle/EPP/mirrlogB[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.311):/oracle/EPP/mirrlogB[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.293):/oracle/EPP/mirrlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.15 \n\t lvname = mirrlogBlv \n\t label = /oracle/EPP/mirrlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:50 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.311):/oracle/EPP/mirrlogB[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.311):/oracle/EPP/mirrlogB[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.312):/oracle/EPP/mirrlogB[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.293):/oracle/EPP/mirrlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.15 \n\t lvname = mirrlogBlv \n\t label = /oracle/EPP/mirrlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:50 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.313):/oracle/EPP/mirrlogB[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.314):/oracle/EPP/mirrlogB[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.315):/oracle/EPP/mirrlogB[fs_mount:115] clodmget -q 'name = mirrlogBlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:115] CuAt_label=/oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/mirrlogB from /etc/filesystems +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:119] : should match /oracle/EPP/mirrlogB from CuAt ODM and /oracle/EPP/mirrlogB from the LVCB +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:123] [[ /oracle/EPP/mirrlogB != /oracle/EPP/mirrlogB ]] +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:128] [[ /oracle/EPP/mirrlogB != /oracle/EPP/mirrlogB ]] +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.339):/oracle/EPP/mirrlogB[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.339):/oracle/EPP/mirrlogB[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.339):/oracle/EPP/mirrlogB[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_activate_fs(1.339):/oracle/EPP/mirrlogB[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.340):/oracle/EPP/mirrlogB[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.365):/oracle/EPP/mirrlogB[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.367):/oracle/EPP/mirrlogB[amlog_trace:319] DATE=2023-01-28T18:06:59.400700 +epprd_rg:cl_activate_fs(1.367):/oracle/EPP/mirrlogB[amlog_trace:320] echo '|2023-01-28T18:06:59.400700|INFO: Activating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_activate_fs(1.367):/oracle/EPP/mirrlogB[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.367):/oracle/EPP/mirrlogB[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.370):/oracle/EPP/mirrlogB[fs_mount:162] : Try to mount filesystem /oracle/EPP/mirrlogB at Jan 28 18:06:59.000 +epprd_rg:cl_activate_fs(1.370):/oracle/EPP/mirrlogB[fs_mount:163] mount /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.381):/oracle/EPP/mirrlogB[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.381):/oracle/EPP/mirrlogB[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.381):/oracle/EPP/mirrlogB[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.381):/oracle/EPP/mirrlogB[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/mirrlogB' +epprd_rg:cl_activate_fs(1.381):/oracle/EPP/mirrlogB[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.382):/oracle/EPP/mirrlogB[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.406):/oracle/EPP/mirrlogB[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.409):/oracle/EPP/mirrlogB[amlog_trace:319] DATE=2023-01-28T18:06:59.442332 +epprd_rg:cl_activate_fs(1.409):/oracle/EPP/mirrlogB[amlog_trace:320] echo '|2023-01-28T18:06:59.442332|INFO: Activating Filesystems completed|/oracle/EPP/mirrlogB' +epprd_rg:cl_activate_fs(1.409):/oracle/EPP/mirrlogB[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.409):/oracle/EPP/mirrlogB[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.409):/oracle/EPP/mirrlogB[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.409):/oracle/EPP/mirrlogB[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.409):/oracle/EPP/mirrlogB[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.409):/oracle/EPP/mirrlogB[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.409):/oracle/EPP/mirrlogB[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.409):/oracle/EPP/mirrlogB[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.409):/oracle/EPP/mirrlogB[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.409):/oracle/EPP/mirrlogB[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.409):/oracle/EPP/mirrlogB[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.410):/oracle/EPP/mirrlogB[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.411):/oracle/EPP/mirrlogB[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/mirrlogB[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/mirrlogB[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/mirrlogB[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/mirrlogB[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/mirrlogB[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/mirrlogB[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/mirrlogB[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/mirrlogB[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.293):/oracle/EPP/mirrlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.15 \n\t lvname = mirrlogBlv \n\t label = /oracle/EPP/mirrlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:50 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/mirrlogB[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/oraarch +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[activate_fs_process_group:540] fs_mount /oracle/EPP/oraarch fsck epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[fs_mount:69] FS=/oracle/EPP/oraarch +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/oraarch +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/oraarch[fs_mount:86] lsfs -c /oracle/EPP/oraarch +epprd_rg:cl_activate_fs(1.414):/oracle/EPP/oraarch[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.419):/oracle/EPP/oraarch[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.415):/oracle/EPP/oraarch[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/oraarch:/dev/oraarchlv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(1.419):/oracle/EPP/oraarch[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.419):/oracle/EPP/oraarch[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.420):/oracle/EPP/oraarch[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.415):/oracle/EPP/oraarch[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/oraarch:/dev/oraarchlv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(1.421):/oracle/EPP/oraarch[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.422):/oracle/EPP/oraarch[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.422):/oracle/EPP/oraarch[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[fs_mount:100] LV_name=oraarchlv +epprd_rg:cl_activate_fs(1.423):/oracle/EPP/oraarch[fs_mount:101] getlvcb -T -A oraarchlv +epprd_rg:cl_activate_fs(1.424):/oracle/EPP/oraarch[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.442):/oracle/EPP/oraarch[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.424):/oracle/EPP/oraarch[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.6 \n\t lvname = oraarchlv \n\t label = /oracle/EPP/oraarch \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.442):/oracle/EPP/oraarch[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.442):/oracle/EPP/oraarch[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.443):/oracle/EPP/oraarch[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.424):/oracle/EPP/oraarch[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.6 \n\t lvname = oraarchlv \n\t label = /oracle/EPP/oraarch \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.444):/oracle/EPP/oraarch[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.445):/oracle/EPP/oraarch[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.446):/oracle/EPP/oraarch[fs_mount:115] clodmget -q 'name = oraarchlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.449):/oracle/EPP/oraarch[fs_mount:115] CuAt_label=/oracle/EPP/oraarch +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/oraarch from /etc/filesystems +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:119] : should match /oracle/EPP/oraarch from CuAt ODM and /oracle/EPP/oraarch from the LVCB +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:123] [[ /oracle/EPP/oraarch != /oracle/EPP/oraarch ]] +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:128] [[ /oracle/EPP/oraarch != /oracle/EPP/oraarch ]] +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.469):/oracle/EPP/oraarch[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.470):/oracle/EPP/oraarch[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.470):/oracle/EPP/oraarch[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_activate_fs(1.470):/oracle/EPP/oraarch[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.470):/oracle/EPP/oraarch[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.494):/oracle/EPP/oraarch[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.497):/oracle/EPP/oraarch[amlog_trace:319] DATE=2023-01-28T18:06:59.530255 +epprd_rg:cl_activate_fs(1.497):/oracle/EPP/oraarch[amlog_trace:320] echo '|2023-01-28T18:06:59.530255|INFO: Activating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_activate_fs(1.497):/oracle/EPP/oraarch[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.497):/oracle/EPP/oraarch[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.500):/oracle/EPP/oraarch[fs_mount:162] : Try to mount filesystem /oracle/EPP/oraarch at Jan 28 18:06:59.000 +epprd_rg:cl_activate_fs(1.500):/oracle/EPP/oraarch[fs_mount:163] mount /oracle/EPP/oraarch +epprd_rg:cl_activate_fs(1.511):/oracle/EPP/oraarch[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.511):/oracle/EPP/oraarch[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.511):/oracle/EPP/oraarch[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.511):/oracle/EPP/oraarch[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/oraarch' +epprd_rg:cl_activate_fs(1.511):/oracle/EPP/oraarch[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.512):/oracle/EPP/oraarch[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.536):/oracle/EPP/oraarch[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.539):/oracle/EPP/oraarch[amlog_trace:319] DATE=2023-01-28T18:06:59.572143 +epprd_rg:cl_activate_fs(1.539):/oracle/EPP/oraarch[amlog_trace:320] echo '|2023-01-28T18:06:59.572143|INFO: Activating Filesystems completed|/oracle/EPP/oraarch' +epprd_rg:cl_activate_fs(1.539):/oracle/EPP/oraarch[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.539):/oracle/EPP/oraarch[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.539):/oracle/EPP/oraarch[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.539):/oracle/EPP/oraarch[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.539):/oracle/EPP/oraarch[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.539):/oracle/EPP/oraarch[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.539):/oracle/EPP/oraarch[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.539):/oracle/EPP/oraarch[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.539):/oracle/EPP/oraarch[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.539):/oracle/EPP/oraarch[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.539):/oracle/EPP/oraarch[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.540):/oracle/EPP/oraarch[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.541):/oracle/EPP/oraarch[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/oraarch[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/oraarch[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/oraarch[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/oraarch[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/oraarch[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/oraarch[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/oraarch[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/oraarch[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.424):/oracle/EPP/oraarch[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.6 \n\t lvname = oraarchlv \n\t label = /oracle/EPP/oraarch \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/oraarch[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/origlogA +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[activate_fs_process_group:540] fs_mount /oracle/EPP/origlogA fsck epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[fs_mount:69] FS=/oracle/EPP/origlogA +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/origlogA +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/origlogA[fs_mount:86] lsfs -c /oracle/EPP/origlogA +epprd_rg:cl_activate_fs(1.544):/oracle/EPP/origlogA[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.549):/oracle/EPP/origlogA[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.545):/oracle/EPP/origlogA[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogA:/dev/origlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.549):/oracle/EPP/origlogA[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.549):/oracle/EPP/origlogA[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.550):/oracle/EPP/origlogA[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.545):/oracle/EPP/origlogA[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogA:/dev/origlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.551):/oracle/EPP/origlogA[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.552):/oracle/EPP/origlogA[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.553):/oracle/EPP/origlogA[fs_mount:100] LV_name=origlogAlv +epprd_rg:cl_activate_fs(1.553):/oracle/EPP/origlogA[fs_mount:101] getlvcb -T -A origlogAlv +epprd_rg:cl_activate_fs(1.554):/oracle/EPP/origlogA[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.571):/oracle/EPP/origlogA[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.554):/oracle/EPP/origlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.12 \n\t lvname = origlogAlv \n\t label = /oracle/EPP/origlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:48 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.571):/oracle/EPP/origlogA[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.571):/oracle/EPP/origlogA[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.572):/oracle/EPP/origlogA[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.554):/oracle/EPP/origlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.12 \n\t lvname = origlogAlv \n\t label = /oracle/EPP/origlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:48 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.573):/oracle/EPP/origlogA[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.574):/oracle/EPP/origlogA[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.575):/oracle/EPP/origlogA[fs_mount:115] clodmget -q 'name = origlogAlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.579):/oracle/EPP/origlogA[fs_mount:115] CuAt_label=/oracle/EPP/origlogA +epprd_rg:cl_activate_fs(1.579):/oracle/EPP/origlogA[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/origlogA from /etc/filesystems +epprd_rg:cl_activate_fs(1.579):/oracle/EPP/origlogA[fs_mount:119] : should match /oracle/EPP/origlogA from CuAt ODM and /oracle/EPP/origlogA from the LVCB +epprd_rg:cl_activate_fs(1.579):/oracle/EPP/origlogA[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.579):/oracle/EPP/origlogA[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.579):/oracle/EPP/origlogA[fs_mount:123] [[ /oracle/EPP/origlogA != /oracle/EPP/origlogA ]] +epprd_rg:cl_activate_fs(1.579):/oracle/EPP/origlogA[fs_mount:128] [[ /oracle/EPP/origlogA != /oracle/EPP/origlogA ]] +epprd_rg:cl_activate_fs(1.579):/oracle/EPP/origlogA[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.579):/oracle/EPP/origlogA[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.579):/oracle/EPP/origlogA[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.599):/oracle/EPP/origlogA[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.599):/oracle/EPP/origlogA[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.599):/oracle/EPP/origlogA[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_activate_fs(1.599):/oracle/EPP/origlogA[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.600):/oracle/EPP/origlogA[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.624):/oracle/EPP/origlogA[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.626):/oracle/EPP/origlogA[amlog_trace:319] DATE=2023-01-28T18:06:59.659734 +epprd_rg:cl_activate_fs(1.626):/oracle/EPP/origlogA[amlog_trace:320] echo '|2023-01-28T18:06:59.659734|INFO: Activating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_activate_fs(1.626):/oracle/EPP/origlogA[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.626):/oracle/EPP/origlogA[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.629):/oracle/EPP/origlogA[fs_mount:162] : Try to mount filesystem /oracle/EPP/origlogA at Jan 28 18:06:59.000 +epprd_rg:cl_activate_fs(1.629):/oracle/EPP/origlogA[fs_mount:163] mount /oracle/EPP/origlogA +epprd_rg:cl_activate_fs(1.640):/oracle/EPP/origlogA[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.640):/oracle/EPP/origlogA[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.640):/oracle/EPP/origlogA[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.640):/oracle/EPP/origlogA[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/origlogA' +epprd_rg:cl_activate_fs(1.640):/oracle/EPP/origlogA[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.641):/oracle/EPP/origlogA[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.665):/oracle/EPP/origlogA[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.668):/oracle/EPP/origlogA[amlog_trace:319] DATE=2023-01-28T18:06:59.701096 +epprd_rg:cl_activate_fs(1.668):/oracle/EPP/origlogA[amlog_trace:320] echo '|2023-01-28T18:06:59.701096|INFO: Activating Filesystems completed|/oracle/EPP/origlogA' +epprd_rg:cl_activate_fs(1.668):/oracle/EPP/origlogA[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.668):/oracle/EPP/origlogA[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.668):/oracle/EPP/origlogA[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.668):/oracle/EPP/origlogA[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.668):/oracle/EPP/origlogA[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.668):/oracle/EPP/origlogA[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.668):/oracle/EPP/origlogA[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.668):/oracle/EPP/origlogA[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.668):/oracle/EPP/origlogA[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.668):/oracle/EPP/origlogA[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.668):/oracle/EPP/origlogA[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.669):/oracle/EPP/origlogA[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.670):/oracle/EPP/origlogA[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogA[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogA[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogA[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogA[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogA[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogA[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogA[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogA[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.554):/oracle/EPP/origlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.12 \n\t lvname = origlogAlv \n\t label = /oracle/EPP/origlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:48 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogA[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/origlogB +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[activate_fs_process_group:540] fs_mount /oracle/EPP/origlogB fsck epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[fs_mount:69] FS=/oracle/EPP/origlogB +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/origlogB +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.672):/oracle/EPP/origlogB[fs_mount:86] lsfs -c /oracle/EPP/origlogB +epprd_rg:cl_activate_fs(1.673):/oracle/EPP/origlogB[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.678):/oracle/EPP/origlogB[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.673):/oracle/EPP/origlogB[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogB:/dev/origlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.678):/oracle/EPP/origlogB[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.678):/oracle/EPP/origlogB[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.679):/oracle/EPP/origlogB[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.673):/oracle/EPP/origlogB[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogB:/dev/origlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.680):/oracle/EPP/origlogB[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogB[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.681):/oracle/EPP/origlogB[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.682):/oracle/EPP/origlogB[fs_mount:100] LV_name=origlogBlv +epprd_rg:cl_activate_fs(1.682):/oracle/EPP/origlogB[fs_mount:101] getlvcb -T -A origlogBlv +epprd_rg:cl_activate_fs(1.683):/oracle/EPP/origlogB[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.701):/oracle/EPP/origlogB[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.683):/oracle/EPP/origlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.13 \n\t lvname = origlogBlv \n\t label = /oracle/EPP/origlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.701):/oracle/EPP/origlogB[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.701):/oracle/EPP/origlogB[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.703):/oracle/EPP/origlogB[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.683):/oracle/EPP/origlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.13 \n\t lvname = origlogBlv \n\t label = /oracle/EPP/origlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.704):/oracle/EPP/origlogB[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.704):/oracle/EPP/origlogB[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.706):/oracle/EPP/origlogB[fs_mount:115] clodmget -q 'name = origlogBlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.709):/oracle/EPP/origlogB[fs_mount:115] CuAt_label=/oracle/EPP/origlogB +epprd_rg:cl_activate_fs(1.709):/oracle/EPP/origlogB[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/origlogB from /etc/filesystems +epprd_rg:cl_activate_fs(1.709):/oracle/EPP/origlogB[fs_mount:119] : should match /oracle/EPP/origlogB from CuAt ODM and /oracle/EPP/origlogB from the LVCB +epprd_rg:cl_activate_fs(1.709):/oracle/EPP/origlogB[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.709):/oracle/EPP/origlogB[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.709):/oracle/EPP/origlogB[fs_mount:123] [[ /oracle/EPP/origlogB != /oracle/EPP/origlogB ]] +epprd_rg:cl_activate_fs(1.709):/oracle/EPP/origlogB[fs_mount:128] [[ /oracle/EPP/origlogB != /oracle/EPP/origlogB ]] +epprd_rg:cl_activate_fs(1.709):/oracle/EPP/origlogB[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.709):/oracle/EPP/origlogB[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.709):/oracle/EPP/origlogB[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.729):/oracle/EPP/origlogB[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.729):/oracle/EPP/origlogB[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.729):/oracle/EPP/origlogB[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_activate_fs(1.729):/oracle/EPP/origlogB[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.730):/oracle/EPP/origlogB[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.754):/oracle/EPP/origlogB[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.756):/oracle/EPP/origlogB[amlog_trace:319] DATE=2023-01-28T18:06:59.789858 +epprd_rg:cl_activate_fs(1.757):/oracle/EPP/origlogB[amlog_trace:320] echo '|2023-01-28T18:06:59.789858|INFO: Activating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_activate_fs(1.757):/oracle/EPP/origlogB[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.757):/oracle/EPP/origlogB[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.759):/oracle/EPP/origlogB[fs_mount:162] : Try to mount filesystem /oracle/EPP/origlogB at Jan 28 18:06:59.000 +epprd_rg:cl_activate_fs(1.759):/oracle/EPP/origlogB[fs_mount:163] mount /oracle/EPP/origlogB +epprd_rg:cl_activate_fs(1.771):/oracle/EPP/origlogB[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.771):/oracle/EPP/origlogB[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.771):/oracle/EPP/origlogB[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.771):/oracle/EPP/origlogB[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/origlogB' +epprd_rg:cl_activate_fs(1.771):/oracle/EPP/origlogB[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.772):/oracle/EPP/origlogB[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.796):/oracle/EPP/origlogB[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.799):/oracle/EPP/origlogB[amlog_trace:319] DATE=2023-01-28T18:06:59.831885 +epprd_rg:cl_activate_fs(1.799):/oracle/EPP/origlogB[amlog_trace:320] echo '|2023-01-28T18:06:59.831885|INFO: Activating Filesystems completed|/oracle/EPP/origlogB' +epprd_rg:cl_activate_fs(1.799):/oracle/EPP/origlogB[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.799):/oracle/EPP/origlogB[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.799):/oracle/EPP/origlogB[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.799):/oracle/EPP/origlogB[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.799):/oracle/EPP/origlogB[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.799):/oracle/EPP/origlogB[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.799):/oracle/EPP/origlogB[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.799):/oracle/EPP/origlogB[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.799):/oracle/EPP/origlogB[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.799):/oracle/EPP/origlogB[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.799):/oracle/EPP/origlogB[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.800):/oracle/EPP/origlogB[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.801):/oracle/EPP/origlogB[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.802):/oracle/EPP/origlogB[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.802):/oracle/EPP/origlogB[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/origlogB[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/origlogB[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/origlogB[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/origlogB[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/origlogB[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/origlogB[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.683):/oracle/EPP/origlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.13 \n\t lvname = origlogBlv \n\t label = /oracle/EPP/origlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/origlogB[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[activate_fs_process_group:540] fs_mount /oracle/EPP/sapdata1 fsck epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[fs_mount:69] FS=/oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.803):/oracle/EPP/sapdata1[fs_mount:86] lsfs -c /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(1.804):/oracle/EPP/sapdata1[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.809):/oracle/EPP/sapdata1[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.804):/oracle/EPP/sapdata1[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata1:/dev/sapdata1lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(1.809):/oracle/EPP/sapdata1[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.809):/oracle/EPP/sapdata1[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.810):/oracle/EPP/sapdata1[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.804):/oracle/EPP/sapdata1[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata1:/dev/sapdata1lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(1.811):/oracle/EPP/sapdata1[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.811):/oracle/EPP/sapdata1[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.812):/oracle/EPP/sapdata1[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.813):/oracle/EPP/sapdata1[fs_mount:100] LV_name=sapdata1lv +epprd_rg:cl_activate_fs(1.813):/oracle/EPP/sapdata1[fs_mount:101] getlvcb -T -A sapdata1lv +epprd_rg:cl_activate_fs(1.814):/oracle/EPP/sapdata1[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.831):/oracle/EPP/sapdata1[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.814):/oracle/EPP/sapdata1[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.7 \n\t lvname = sapdata1lv \n\t label = /oracle/EPP/sapdata1 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:44 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' +epprd_rg:cl_activate_fs(1.831):/oracle/EPP/sapdata1[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.831):/oracle/EPP/sapdata1[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.833):/oracle/EPP/sapdata1[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.814):/oracle/EPP/sapdata1[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.7 \n\t lvname = sapdata1lv \n\t label = /oracle/EPP/sapdata1 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:44 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' +epprd_rg:cl_activate_fs(1.833):/oracle/EPP/sapdata1[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.834):/oracle/EPP/sapdata1[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.836):/oracle/EPP/sapdata1[fs_mount:115] clodmget -q 'name = sapdata1lv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.839):/oracle/EPP/sapdata1[fs_mount:115] CuAt_label=/oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(1.839):/oracle/EPP/sapdata1[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/sapdata1 from /etc/filesystems +epprd_rg:cl_activate_fs(1.839):/oracle/EPP/sapdata1[fs_mount:119] : should match /oracle/EPP/sapdata1 from CuAt ODM and /oracle/EPP/sapdata1 from the LVCB +epprd_rg:cl_activate_fs(1.839):/oracle/EPP/sapdata1[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.839):/oracle/EPP/sapdata1[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.839):/oracle/EPP/sapdata1[fs_mount:123] [[ /oracle/EPP/sapdata1 != /oracle/EPP/sapdata1 ]] +epprd_rg:cl_activate_fs(1.839):/oracle/EPP/sapdata1[fs_mount:128] [[ /oracle/EPP/sapdata1 != /oracle/EPP/sapdata1 ]] +epprd_rg:cl_activate_fs(1.839):/oracle/EPP/sapdata1[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.839):/oracle/EPP/sapdata1[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.839):/oracle/EPP/sapdata1[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.859):/oracle/EPP/sapdata1[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.859):/oracle/EPP/sapdata1[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.859):/oracle/EPP/sapdata1[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_activate_fs(1.859):/oracle/EPP/sapdata1[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.860):/oracle/EPP/sapdata1[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.884):/oracle/EPP/sapdata1[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.887):/oracle/EPP/sapdata1[amlog_trace:319] DATE=2023-01-28T18:06:59.920256 +epprd_rg:cl_activate_fs(1.887):/oracle/EPP/sapdata1[amlog_trace:320] echo '|2023-01-28T18:06:59.920256|INFO: Activating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_activate_fs(1.887):/oracle/EPP/sapdata1[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.887):/oracle/EPP/sapdata1[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.890):/oracle/EPP/sapdata1[fs_mount:162] : Try to mount filesystem /oracle/EPP/sapdata1 at Jan 28 18:06:59.000 +epprd_rg:cl_activate_fs(1.890):/oracle/EPP/sapdata1[fs_mount:163] mount /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(1.901):/oracle/EPP/sapdata1[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.901):/oracle/EPP/sapdata1[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.901):/oracle/EPP/sapdata1[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.901):/oracle/EPP/sapdata1[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/sapdata1' +epprd_rg:cl_activate_fs(1.901):/oracle/EPP/sapdata1[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.902):/oracle/EPP/sapdata1[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.926):/oracle/EPP/sapdata1[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.929):/oracle/EPP/sapdata1[amlog_trace:319] DATE=2023-01-28T18:06:59.962205 +epprd_rg:cl_activate_fs(1.929):/oracle/EPP/sapdata1[amlog_trace:320] echo '|2023-01-28T18:06:59.962205|INFO: Activating Filesystems completed|/oracle/EPP/sapdata1' +epprd_rg:cl_activate_fs(1.929):/oracle/EPP/sapdata1[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.929):/oracle/EPP/sapdata1[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.929):/oracle/EPP/sapdata1[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.929):/oracle/EPP/sapdata1[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.929):/oracle/EPP/sapdata1[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.929):/oracle/EPP/sapdata1[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.929):/oracle/EPP/sapdata1[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.929):/oracle/EPP/sapdata1[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.929):/oracle/EPP/sapdata1[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.929):/oracle/EPP/sapdata1[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.929):/oracle/EPP/sapdata1[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.930):/oracle/EPP/sapdata1[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.931):/oracle/EPP/sapdata1[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata1[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata1[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata1[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata1[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata1[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata1[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata1[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata1[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.814):/oracle/EPP/sapdata1[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.7 \n\t lvname = sapdata1lv \n\t label = /oracle/EPP/sapdata1 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:44 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata1[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[activate_fs_process_group:540] fs_mount /oracle/EPP/sapdata2 fsck epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[fs_mount:69] FS=/oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.933):/oracle/EPP/sapdata2[fs_mount:86] lsfs -c /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(1.934):/oracle/EPP/sapdata2[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/sapdata2[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.935):/oracle/EPP/sapdata2[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata2:/dev/sapdata2lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/sapdata2[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.939):/oracle/EPP/sapdata2[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.940):/oracle/EPP/sapdata2[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.935):/oracle/EPP/sapdata2[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata2:/dev/sapdata2lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(1.941):/oracle/EPP/sapdata2[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.942):/oracle/EPP/sapdata2[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.943):/oracle/EPP/sapdata2[fs_mount:100] LV_name=sapdata2lv +epprd_rg:cl_activate_fs(1.943):/oracle/EPP/sapdata2[fs_mount:101] getlvcb -T -A sapdata2lv +epprd_rg:cl_activate_fs(1.944):/oracle/EPP/sapdata2[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.962):/oracle/EPP/sapdata2[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.944):/oracle/EPP/sapdata2[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.8 \n\t lvname = sapdata2lv \n\t label = /oracle/EPP/sapdata2 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:45 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' +epprd_rg:cl_activate_fs(1.963):/oracle/EPP/sapdata2[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.963):/oracle/EPP/sapdata2[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.964):/oracle/EPP/sapdata2[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.944):/oracle/EPP/sapdata2[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.8 \n\t lvname = sapdata2lv \n\t label = /oracle/EPP/sapdata2 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:45 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' +epprd_rg:cl_activate_fs(1.965):/oracle/EPP/sapdata2[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.965):/oracle/EPP/sapdata2[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.967):/oracle/EPP/sapdata2[fs_mount:115] clodmget -q 'name = sapdata2lv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.970):/oracle/EPP/sapdata2[fs_mount:115] CuAt_label=/oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(1.970):/oracle/EPP/sapdata2[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/sapdata2 from /etc/filesystems +epprd_rg:cl_activate_fs(1.970):/oracle/EPP/sapdata2[fs_mount:119] : should match /oracle/EPP/sapdata2 from CuAt ODM and /oracle/EPP/sapdata2 from the LVCB +epprd_rg:cl_activate_fs(1.970):/oracle/EPP/sapdata2[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.970):/oracle/EPP/sapdata2[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.970):/oracle/EPP/sapdata2[fs_mount:123] [[ /oracle/EPP/sapdata2 != /oracle/EPP/sapdata2 ]] +epprd_rg:cl_activate_fs(1.970):/oracle/EPP/sapdata2[fs_mount:128] [[ /oracle/EPP/sapdata2 != /oracle/EPP/sapdata2 ]] +epprd_rg:cl_activate_fs(1.970):/oracle/EPP/sapdata2[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.970):/oracle/EPP/sapdata2[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.970):/oracle/EPP/sapdata2[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.990):/oracle/EPP/sapdata2[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.990):/oracle/EPP/sapdata2[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.990):/oracle/EPP/sapdata2[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_activate_fs(1.990):/oracle/EPP/sapdata2[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.991):/oracle/EPP/sapdata2[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.015):/oracle/EPP/sapdata2[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.018):/oracle/EPP/sapdata2[amlog_trace:319] DATE=2023-01-28T18:07:00.051162 +epprd_rg:cl_activate_fs(2.018):/oracle/EPP/sapdata2[amlog_trace:320] echo '|2023-01-28T18:07:00.051162|INFO: Activating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_activate_fs(2.018):/oracle/EPP/sapdata2[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.018):/oracle/EPP/sapdata2[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(2.021):/oracle/EPP/sapdata2[fs_mount:162] : Try to mount filesystem /oracle/EPP/sapdata2 at Jan 28 18:07:00.000 +epprd_rg:cl_activate_fs(2.021):/oracle/EPP/sapdata2[fs_mount:163] mount /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(2.032):/oracle/EPP/sapdata2[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.032):/oracle/EPP/sapdata2[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(2.032):/oracle/EPP/sapdata2[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(2.032):/oracle/EPP/sapdata2[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/sapdata2' +epprd_rg:cl_activate_fs(2.032):/oracle/EPP/sapdata2[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.033):/oracle/EPP/sapdata2[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.057):/oracle/EPP/sapdata2[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.060):/oracle/EPP/sapdata2[amlog_trace:319] DATE=2023-01-28T18:07:00.093071 +epprd_rg:cl_activate_fs(2.060):/oracle/EPP/sapdata2[amlog_trace:320] echo '|2023-01-28T18:07:00.093071|INFO: Activating Filesystems completed|/oracle/EPP/sapdata2' +epprd_rg:cl_activate_fs(2.060):/oracle/EPP/sapdata2[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.060):/oracle/EPP/sapdata2[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(2.060):/oracle/EPP/sapdata2[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(2.060):/oracle/EPP/sapdata2[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(2.060):/oracle/EPP/sapdata2[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(2.060):/oracle/EPP/sapdata2[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(2.060):/oracle/EPP/sapdata2[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(2.060):/oracle/EPP/sapdata2[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(2.060):/oracle/EPP/sapdata2[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(2.060):/oracle/EPP/sapdata2[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(2.060):/oracle/EPP/sapdata2[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(2.061):/oracle/EPP/sapdata2[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(2.062):/oracle/EPP/sapdata2[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata2[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata2[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata2[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata2[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata2[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata2[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata2[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata2[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.944):/oracle/EPP/sapdata2[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.8 \n\t lvname = sapdata2lv \n\t label = /oracle/EPP/sapdata2 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:45 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata2[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[activate_fs_process_group:540] fs_mount /oracle/EPP/sapdata3 fsck epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[fs_mount:69] FS=/oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(2.064):/oracle/EPP/sapdata3[fs_mount:86] lsfs -c /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(2.065):/oracle/EPP/sapdata3[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(2.070):/oracle/EPP/sapdata3[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(2.065):/oracle/EPP/sapdata3[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata3:/dev/sapdata3lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.070):/oracle/EPP/sapdata3[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(2.070):/oracle/EPP/sapdata3[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.071):/oracle/EPP/sapdata3[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(2.065):/oracle/EPP/sapdata3[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata3:/dev/sapdata3lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.072):/oracle/EPP/sapdata3[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata3[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(2.074):/oracle/EPP/sapdata3[fs_mount:100] LV_name=sapdata3lv +epprd_rg:cl_activate_fs(2.074):/oracle/EPP/sapdata3[fs_mount:101] getlvcb -T -A sapdata3lv +epprd_rg:cl_activate_fs(2.075):/oracle/EPP/sapdata3[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(2.093):/oracle/EPP/sapdata3[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(2.075):/oracle/EPP/sapdata3[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.9 \n\t lvname = sapdata3lv \n\t label = /oracle/EPP/sapdata3 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' +epprd_rg:cl_activate_fs(2.093):/oracle/EPP/sapdata3[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(2.093):/oracle/EPP/sapdata3[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.094):/oracle/EPP/sapdata3[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(2.075):/oracle/EPP/sapdata3[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.9 \n\t lvname = sapdata3lv \n\t label = /oracle/EPP/sapdata3 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' +epprd_rg:cl_activate_fs(2.094):/oracle/EPP/sapdata3[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(2.095):/oracle/EPP/sapdata3[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(2.097):/oracle/EPP/sapdata3[fs_mount:115] clodmget -q 'name = sapdata3lv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(2.100):/oracle/EPP/sapdata3[fs_mount:115] CuAt_label=/oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(2.100):/oracle/EPP/sapdata3[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/sapdata3 from /etc/filesystems +epprd_rg:cl_activate_fs(2.100):/oracle/EPP/sapdata3[fs_mount:119] : should match /oracle/EPP/sapdata3 from CuAt ODM and /oracle/EPP/sapdata3 from the LVCB +epprd_rg:cl_activate_fs(2.100):/oracle/EPP/sapdata3[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(2.100):/oracle/EPP/sapdata3[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(2.100):/oracle/EPP/sapdata3[fs_mount:123] [[ /oracle/EPP/sapdata3 != /oracle/EPP/sapdata3 ]] +epprd_rg:cl_activate_fs(2.100):/oracle/EPP/sapdata3[fs_mount:128] [[ /oracle/EPP/sapdata3 != /oracle/EPP/sapdata3 ]] +epprd_rg:cl_activate_fs(2.100):/oracle/EPP/sapdata3[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.100):/oracle/EPP/sapdata3[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(2.100):/oracle/EPP/sapdata3[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(2.120):/oracle/EPP/sapdata3[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.120):/oracle/EPP/sapdata3[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(2.120):/oracle/EPP/sapdata3[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_activate_fs(2.120):/oracle/EPP/sapdata3[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.121):/oracle/EPP/sapdata3[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.145):/oracle/EPP/sapdata3[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.148):/oracle/EPP/sapdata3[amlog_trace:319] DATE=2023-01-28T18:07:00.181196 +epprd_rg:cl_activate_fs(2.148):/oracle/EPP/sapdata3[amlog_trace:320] echo '|2023-01-28T18:07:00.181196|INFO: Activating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_activate_fs(2.148):/oracle/EPP/sapdata3[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.148):/oracle/EPP/sapdata3[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(2.151):/oracle/EPP/sapdata3[fs_mount:162] : Try to mount filesystem /oracle/EPP/sapdata3 at Jan 28 18:07:00.000 +epprd_rg:cl_activate_fs(2.151):/oracle/EPP/sapdata3[fs_mount:163] mount /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(2.162):/oracle/EPP/sapdata3[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.162):/oracle/EPP/sapdata3[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(2.162):/oracle/EPP/sapdata3[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(2.162):/oracle/EPP/sapdata3[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/sapdata3' +epprd_rg:cl_activate_fs(2.162):/oracle/EPP/sapdata3[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.163):/oracle/EPP/sapdata3[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.187):/oracle/EPP/sapdata3[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.189):/oracle/EPP/sapdata3[amlog_trace:319] DATE=2023-01-28T18:07:00.222745 +epprd_rg:cl_activate_fs(2.189):/oracle/EPP/sapdata3[amlog_trace:320] echo '|2023-01-28T18:07:00.222745|INFO: Activating Filesystems completed|/oracle/EPP/sapdata3' +epprd_rg:cl_activate_fs(2.189):/oracle/EPP/sapdata3[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.189):/oracle/EPP/sapdata3[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(2.190):/oracle/EPP/sapdata3[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(2.190):/oracle/EPP/sapdata3[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(2.190):/oracle/EPP/sapdata3[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(2.190):/oracle/EPP/sapdata3[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(2.190):/oracle/EPP/sapdata3[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(2.190):/oracle/EPP/sapdata3[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(2.190):/oracle/EPP/sapdata3[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(2.190):/oracle/EPP/sapdata3[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(2.190):/oracle/EPP/sapdata3[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(2.191):/oracle/EPP/sapdata3[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(2.191):/oracle/EPP/sapdata3[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(2.193):/oracle/EPP/sapdata3[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(2.193):/oracle/EPP/sapdata3[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(2.193):/oracle/EPP/sapdata3[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(2.193):/oracle/EPP/sapdata3[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(2.193):/oracle/EPP/sapdata3[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(2.193):/oracle/EPP/sapdata3[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(2.193):/oracle/EPP/sapdata3[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(2.193):/oracle/EPP/sapdata3[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(2.075):/oracle/EPP/sapdata3[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.9 \n\t lvname = sapdata3lv \n\t label = /oracle/EPP/sapdata3 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata3[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[activate_fs_process_group:540] fs_mount /oracle/EPP/sapdata4 fsck epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[fs_mount:69] FS=/oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(2.194):/oracle/EPP/sapdata4[fs_mount:86] lsfs -c /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(2.195):/oracle/EPP/sapdata4[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(2.200):/oracle/EPP/sapdata4[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(2.195):/oracle/EPP/sapdata4[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata4:/dev/sapdata4lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.200):/oracle/EPP/sapdata4[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(2.200):/oracle/EPP/sapdata4[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.201):/oracle/EPP/sapdata4[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(2.195):/oracle/EPP/sapdata4[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata4:/dev/sapdata4lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.201):/oracle/EPP/sapdata4[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(2.202):/oracle/EPP/sapdata4[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata4[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(2.204):/oracle/EPP/sapdata4[fs_mount:100] LV_name=sapdata4lv +epprd_rg:cl_activate_fs(2.204):/oracle/EPP/sapdata4[fs_mount:101] getlvcb -T -A sapdata4lv +epprd_rg:cl_activate_fs(2.205):/oracle/EPP/sapdata4[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(2.223):/oracle/EPP/sapdata4[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(2.205):/oracle/EPP/sapdata4[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.10 \n\t lvname = sapdata4lv \n\t label = /oracle/EPP/sapdata4 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' +epprd_rg:cl_activate_fs(2.223):/oracle/EPP/sapdata4[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(2.223):/oracle/EPP/sapdata4[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.224):/oracle/EPP/sapdata4[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(2.205):/oracle/EPP/sapdata4[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.10 \n\t lvname = sapdata4lv \n\t label = /oracle/EPP/sapdata4 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' +epprd_rg:cl_activate_fs(2.225):/oracle/EPP/sapdata4[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(2.226):/oracle/EPP/sapdata4[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(2.227):/oracle/EPP/sapdata4[fs_mount:115] clodmget -q 'name = sapdata4lv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(2.231):/oracle/EPP/sapdata4[fs_mount:115] CuAt_label=/oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(2.231):/oracle/EPP/sapdata4[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/sapdata4 from /etc/filesystems +epprd_rg:cl_activate_fs(2.231):/oracle/EPP/sapdata4[fs_mount:119] : should match /oracle/EPP/sapdata4 from CuAt ODM and /oracle/EPP/sapdata4 from the LVCB +epprd_rg:cl_activate_fs(2.231):/oracle/EPP/sapdata4[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(2.231):/oracle/EPP/sapdata4[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(2.231):/oracle/EPP/sapdata4[fs_mount:123] [[ /oracle/EPP/sapdata4 != /oracle/EPP/sapdata4 ]] +epprd_rg:cl_activate_fs(2.231):/oracle/EPP/sapdata4[fs_mount:128] [[ /oracle/EPP/sapdata4 != /oracle/EPP/sapdata4 ]] +epprd_rg:cl_activate_fs(2.231):/oracle/EPP/sapdata4[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.231):/oracle/EPP/sapdata4[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(2.231):/oracle/EPP/sapdata4[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.252):/oracle/EPP/sapdata4[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.275):/oracle/EPP/sapdata4[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.278):/oracle/EPP/sapdata4[amlog_trace:319] DATE=2023-01-28T18:07:00.311560 +epprd_rg:cl_activate_fs(2.278):/oracle/EPP/sapdata4[amlog_trace:320] echo '|2023-01-28T18:07:00.311560|INFO: Activating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_activate_fs(2.278):/oracle/EPP/sapdata4[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.278):/oracle/EPP/sapdata4[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(2.281):/oracle/EPP/sapdata4[fs_mount:162] : Try to mount filesystem /oracle/EPP/sapdata4 at Jan 28 18:07:00.000 +epprd_rg:cl_activate_fs(2.281):/oracle/EPP/sapdata4[fs_mount:163] mount /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(2.292):/oracle/EPP/sapdata4[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.292):/oracle/EPP/sapdata4[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(2.292):/oracle/EPP/sapdata4[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(2.293):/oracle/EPP/sapdata4[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/sapdata4' +epprd_rg:cl_activate_fs(2.293):/oracle/EPP/sapdata4[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.293):/oracle/EPP/sapdata4[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.317):/oracle/EPP/sapdata4[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.320):/oracle/EPP/sapdata4[amlog_trace:319] DATE=2023-01-28T18:07:00.353430 +epprd_rg:cl_activate_fs(2.320):/oracle/EPP/sapdata4[amlog_trace:320] echo '|2023-01-28T18:07:00.353430|INFO: Activating Filesystems completed|/oracle/EPP/sapdata4' +epprd_rg:cl_activate_fs(2.320):/oracle/EPP/sapdata4[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.320):/oracle/EPP/sapdata4[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(2.320):/oracle/EPP/sapdata4[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(2.320):/oracle/EPP/sapdata4[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(2.320):/oracle/EPP/sapdata4[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(2.320):/oracle/EPP/sapdata4[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(2.320):/oracle/EPP/sapdata4[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(2.320):/oracle/EPP/sapdata4[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(2.320):/oracle/EPP/sapdata4[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(2.320):/oracle/EPP/sapdata4[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(2.320):/oracle/EPP/sapdata4[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(2.321):/oracle/EPP/sapdata4[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(2.322):/oracle/EPP/sapdata4[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(2.324):/oracle/EPP/sapdata4[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(2.324):/oracle/EPP/sapdata4[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(2.324):/oracle/EPP/sapdata4[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(2.324):/oracle/EPP/sapdata4[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(2.324):/oracle/EPP/sapdata4[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(2.324):/oracle/EPP/sapdata4[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(2.324):/oracle/EPP/sapdata4[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(2.324):/oracle/EPP/sapdata4[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(2.205):/oracle/EPP/sapdata4[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.10 \n\t lvname = sapdata4lv \n\t label = /oracle/EPP/sapdata4 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(2.324):/oracle/EPP/sapdata4[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(2.324):/sapmnt[activate_fs_process_group:527] PS4_LOOP=/sapmnt +epprd_rg:cl_activate_fs(2.324):/sapmnt[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(2.324):/sapmnt[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(2.324):/sapmnt[activate_fs_process_group:540] fs_mount /sapmnt fsck epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(2.324):/sapmnt[fs_mount:69] FS=/sapmnt +epprd_rg:cl_activate_fs(2.324):/sapmnt[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(2.324):/sapmnt[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(2.324):/sapmnt[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(2.324):/sapmnt[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(2.324):/sapmnt[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(2.324):/sapmnt[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.324):/sapmnt[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(2.324):/sapmnt[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(2.324):/sapmnt[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(2.324):/sapmnt[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(2.324):/sapmnt[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(2.324):/sapmnt[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(2.325):/sapmnt[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(2.325):/sapmnt[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(2.325):/sapmnt[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(2.325):/sapmnt[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(2.325):/sapmnt[fs_mount:81] : Here check to see if the information in /etc/filesystems for /sapmnt +epprd_rg:cl_activate_fs(2.325):/sapmnt[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(2.325):/sapmnt[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(2.325):/sapmnt[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(2.325):/sapmnt[fs_mount:86] lsfs -c /sapmnt +epprd_rg:cl_activate_fs(2.326):/sapmnt[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(2.331):/sapmnt[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(2.326):/sapmnt[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/sapmnt:/dev/sapmntlv:jfs2:::20971520:rw:no:no' +epprd_rg:cl_activate_fs(2.331):/sapmnt[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(2.331):/sapmnt[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.332):/sapmnt[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(2.326):/sapmnt[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/sapmnt:/dev/sapmntlv:jfs2:::20971520:rw:no:no' +epprd_rg:cl_activate_fs(2.334):/sapmnt[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(2.336):/sapmnt[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(2.336):/sapmnt[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(2.336):/sapmnt[fs_mount:100] LV_name=sapmntlv +epprd_rg:cl_activate_fs(2.336):/sapmnt[fs_mount:101] getlvcb -T -A sapmntlv +epprd_rg:cl_activate_fs(2.337):/sapmnt[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(2.358):/sapmnt[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(2.337):/sapmnt[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.3 \n\t lvname = sapmntlv \n\t label = /sapmnt \n\t machine id = 44AF14B00 \n\t number lps = 20 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:41 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' +epprd_rg:cl_activate_fs(2.358):/sapmnt[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(2.358):/sapmnt[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.359):/sapmnt[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(2.337):/sapmnt[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.3 \n\t lvname = sapmntlv \n\t label = /sapmnt \n\t machine id = 44AF14B00 \n\t number lps = 20 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:41 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' +epprd_rg:cl_activate_fs(2.360):/sapmnt[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(2.361):/sapmnt[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(2.362):/sapmnt[fs_mount:115] clodmget -q 'name = sapmntlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(2.366):/sapmnt[fs_mount:115] CuAt_label=/sapmnt +epprd_rg:cl_activate_fs(2.366):/sapmnt[fs_mount:118] : At this point, if things are working correctly, /sapmnt from /etc/filesystems +epprd_rg:cl_activate_fs(2.366):/sapmnt[fs_mount:119] : should match /sapmnt from CuAt ODM and /sapmnt from the LVCB +epprd_rg:cl_activate_fs(2.366):/sapmnt[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(2.366):/sapmnt[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(2.366):/sapmnt[fs_mount:123] [[ /sapmnt != /sapmnt ]] +epprd_rg:cl_activate_fs(2.366):/sapmnt[fs_mount:128] [[ /sapmnt != /sapmnt ]] +epprd_rg:cl_activate_fs(2.366):/sapmnt[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.366):/sapmnt[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(2.366):/sapmnt[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(2.386):/sapmnt[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.387):/sapmnt[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(2.387):/sapmnt[fs_mount:160] amlog_trace '' 'Activating Filesystem|/sapmnt' +epprd_rg:cl_activate_fs(2.387):/sapmnt[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.388):/sapmnt[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.413):/sapmnt[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.416):/sapmnt[amlog_trace:319] DATE=2023-01-28T18:07:00.449371 +epprd_rg:cl_activate_fs(2.416):/sapmnt[amlog_trace:320] echo '|2023-01-28T18:07:00.449371|INFO: Activating Filesystem|/sapmnt' +epprd_rg:cl_activate_fs(2.416):/sapmnt[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.416):/sapmnt[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(2.419):/sapmnt[fs_mount:162] : Try to mount filesystem /sapmnt at Jan 28 18:07:00.000 +epprd_rg:cl_activate_fs(2.419):/sapmnt[fs_mount:163] mount /sapmnt +epprd_rg:cl_activate_fs(2.430):/sapmnt[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.430):/sapmnt[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(2.430):/sapmnt[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(2.430):/sapmnt[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/sapmnt' +epprd_rg:cl_activate_fs(2.430):/sapmnt[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.431):/sapmnt[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.457):/sapmnt[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.459):/sapmnt[amlog_trace:319] DATE=2023-01-28T18:07:00.492777 +epprd_rg:cl_activate_fs(2.459):/sapmnt[amlog_trace:320] echo '|2023-01-28T18:07:00.492777|INFO: Activating Filesystems completed|/sapmnt' +epprd_rg:cl_activate_fs(2.459):/sapmnt[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.460):/sapmnt[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(2.460):/sapmnt[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(2.460):/sapmnt[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(2.460):/sapmnt[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(2.460):/sapmnt[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(2.460):/sapmnt[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(2.460):/sapmnt[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(2.460):/sapmnt[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(2.460):/sapmnt[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(2.460):/sapmnt[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(2.461):/sapmnt[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(2.461):/sapmnt[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(2.464):/sapmnt[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(2.464):/sapmnt[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(2.464):/sapmnt[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(2.464):/sapmnt[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(2.464):/sapmnt[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(2.464):/sapmnt[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(2.464):/sapmnt[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(2.464):/sapmnt[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(2.337):/sapmnt[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.3 \n\t lvname = sapmntlv \n\t label = /sapmnt \n\t machine id = 44AF14B00 \n\t number lps = 20 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:41 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(2.464):/sapmnt[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(2.464):/usr/sap[activate_fs_process_group:527] PS4_LOOP=/usr/sap +epprd_rg:cl_activate_fs(2.464):/usr/sap[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(2.464):/usr/sap[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(2.464):/usr/sap[activate_fs_process_group:540] fs_mount /usr/sap fsck epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:69] FS=/usr/sap +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp27263242 +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:81] : Here check to see if the information in /etc/filesystems for /usr/sap +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(2.464):/usr/sap[fs_mount:86] lsfs -c /usr/sap +epprd_rg:cl_activate_fs(2.465):/usr/sap[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(2.470):/usr/sap[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(2.465):/usr/sap[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/usr/sap:/dev/saplv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.470):/usr/sap[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(2.470):/usr/sap[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.471):/usr/sap[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(2.465):/usr/sap[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/usr/sap:/dev/saplv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.473):/usr/sap[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(2.475):/usr/sap[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(2.475):/usr/sap[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(2.475):/usr/sap[fs_mount:100] LV_name=saplv +epprd_rg:cl_activate_fs(2.475):/usr/sap[fs_mount:101] getlvcb -T -A saplv +epprd_rg:cl_activate_fs(2.476):/usr/sap[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(2.494):/usr/sap[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(2.477):/usr/sap[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.2 \n\t lvname = saplv \n\t label = /usr/sap \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:37 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' +epprd_rg:cl_activate_fs(2.494):/usr/sap[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(2.494):/usr/sap[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.495):/usr/sap[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(2.477):/usr/sap[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.2 \n\t lvname = saplv \n\t label = /usr/sap \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:37 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' +epprd_rg:cl_activate_fs(2.496):/usr/sap[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(2.497):/usr/sap[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(2.498):/usr/sap[fs_mount:115] clodmget -q 'name = saplv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(2.502):/usr/sap[fs_mount:115] CuAt_label=/usr/sap +epprd_rg:cl_activate_fs(2.502):/usr/sap[fs_mount:118] : At this point, if things are working correctly, /usr/sap from /etc/filesystems +epprd_rg:cl_activate_fs(2.502):/usr/sap[fs_mount:119] : should match /usr/sap from CuAt ODM and /usr/sap from the LVCB +epprd_rg:cl_activate_fs(2.502):/usr/sap[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(2.502):/usr/sap[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(2.502):/usr/sap[fs_mount:123] [[ /usr/sap != /usr/sap ]] +epprd_rg:cl_activate_fs(2.502):/usr/sap[fs_mount:128] [[ /usr/sap != /usr/sap ]] +epprd_rg:cl_activate_fs(2.502):/usr/sap[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.502):/usr/sap[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(2.502):/usr/sap[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(2.522):/usr/sap[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.522):/usr/sap[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(2.522):/usr/sap[fs_mount:160] amlog_trace '' 'Activating Filesystem|/usr/sap' +epprd_rg:cl_activate_fs(2.522):/usr/sap[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.523):/usr/sap[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.547):/usr/sap[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.549):/usr/sap[amlog_trace:319] DATE=2023-01-28T18:07:00.582738 +epprd_rg:cl_activate_fs(2.549):/usr/sap[amlog_trace:320] echo '|2023-01-28T18:07:00.582738|INFO: Activating Filesystem|/usr/sap' +epprd_rg:cl_activate_fs(2.549):/usr/sap[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.549):/usr/sap[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(2.552):/usr/sap[fs_mount:162] : Try to mount filesystem /usr/sap at Jan 28 18:07:00.000 +epprd_rg:cl_activate_fs(2.552):/usr/sap[fs_mount:163] mount /usr/sap +epprd_rg:cl_activate_fs(2.564):/usr/sap[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.564):/usr/sap[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(2.564):/usr/sap[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(2.564):/usr/sap[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/usr/sap' +epprd_rg:cl_activate_fs(2.564):/usr/sap[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.565):/usr/sap[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.589):/usr/sap[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.592):/usr/sap[amlog_trace:319] DATE=2023-01-28T18:07:00.624961 +epprd_rg:cl_activate_fs(2.592):/usr/sap[amlog_trace:320] echo '|2023-01-28T18:07:00.624961|INFO: Activating Filesystems completed|/usr/sap' +epprd_rg:cl_activate_fs(2.592):/usr/sap[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.592):/usr/sap[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(2.592):/usr/sap[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(2.592):/usr/sap[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(2.592):/usr/sap[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(2.592):/usr/sap[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(2.592):/usr/sap[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(2.592):/usr/sap[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(2.592):/usr/sap[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(2.592):/usr/sap[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(2.592):/usr/sap[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(2.593):/usr/sap[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(2.593):/usr/sap[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(2.595):/usr/sap[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(2.595):/usr/sap[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(2.596):/usr/sap[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(2.596):/usr/sap[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(2.596):/usr/sap[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(2.596):/usr/sap[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(2.596):/usr/sap[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(2.596):/usr/sap[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(2.477):/usr/sap[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.2 \n\t lvname = saplv \n\t label = /usr/sap \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:37 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(2.596):/usr/sap[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(2.596):/usr/sap[activate_fs_process_group:543] unset PS4_LOOP PS4_TIMER +epprd_rg:cl_activate_fs[activate_fs_process_group:546] : Allow any background mount operations to finish +epprd_rg:cl_activate_fs[activate_fs_process_group:548] wait +epprd_rg:cl_activate_fs[activate_fs_process_group:550] : Read cluster level Preferread read option +epprd_rg:cl_activate_fs[activate_fs_process_group:552] clodmget -n -f lvm_preferred_read HACMPcluster +epprd_rg:cl_activate_fs[activate_fs_process_group:552] cluster_pref_read=roundrobin +epprd_rg:cl_activate_fs[activate_fs_process_group:555] : Looping all file systems to update preferred read option of each lv. +epprd_rg:cl_activate_fs[activate_fs_process_group:556] : By referring VG level preferred_read option or cluster level Preferred read option +epprd_rg:cl_activate_fs[activate_fs_process_group:560] lsfs -c /board_org +epprd_rg:cl_activate_fs[activate_fs_process_group:560] 2>& 1 +epprd_rg:cl_activate_fs[activate_fs_process_group:560] FS_info=$'+epprd_rg:cl_activate_fs[activate_fs_process_group:560] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs[activate_fs_process_group:561] RC=0 +epprd_rg:cl_activate_fs[activate_fs_process_group:562] (( 0 != 0 )) +epprd_rg:cl_activate_fs[activate_fs_process_group:574] print -- $'+epprd_rg:cl_activate_fs[activate_fs_process_group:560] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs[activate_fs_process_group:574] tail -1 +epprd_rg:cl_activate_fs[activate_fs_process_group:574] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs[activate_fs_process_group:574] IFS=: +epprd_rg:cl_activate_fs[activate_fs_process_group:575] LV_name=boardlv +epprd_rg:cl_activate_fs[activate_fs_process_group:577] grep -w 'VOLUME GROUP' +epprd_rg:cl_activate_fs[activate_fs_process_group:577] lslv -L boardlv +epprd_rg:cl_activate_fs[activate_fs_process_group:577] LC_ALL=C +epprd_rg:cl_activate_fs[activate_fs_process_group:577] volume_group='LOGICAL VOLUME: boardlv VOLUME GROUP: datavg' +epprd_rg:cl_activate_fs[activate_fs_process_group:578] volume_group=datavg +epprd_rg:cl_activate_fs[activate_fs_process_group:579] volume_group=datavg +epprd_rg:cl_activate_fs[activate_fs_process_group:581] clodmget -n -f group -q name='VOLUME_GROUP and value=datavg' HACMPresource +epprd_rg:cl_activate_fs[activate_fs_process_group:581] RGName=epprd_rg +epprd_rg:cl_activate_fs[activate_fs_process_group:584] : Get the Preferred storage read option for this VG and perform chlv command +epprd_rg:cl_activate_fs[activate_fs_process_group:586] clodmget -n -f value -q name='LVM_PREFERRED_READ and volume_group=datavg' HACMPvolumegroup +epprd_rg:cl_activate_fs[activate_fs_process_group:586] 2> /dev/null +epprd_rg:cl_activate_fs[activate_fs_process_group:586] PreferredReadOption='' +epprd_rg:cl_activate_fs[activate_fs_process_group:587] [[ -z '' ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:589] PreferredReadOption=roundrobin +epprd_rg:cl_activate_fs[activate_fs_process_group:590] [[ -z roundrobin ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:590] [[ roundrobin == roundrobin ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:593] : Both VG level and Cluster level LVM Preferred Read option chosen as roundrobin. +epprd_rg:cl_activate_fs[activate_fs_process_group:595] chlv -R 0 boardlv +epprd_rg:cl_activate_fs[activate_fs_process_group:596] (( 0 != 0 )) +epprd_rg:cl_activate_fs[activate_fs_process_group:600] break +epprd_rg:cl_activate_fs[activate_fs_process_group:670] : Update the resource manager with the state of the operation +epprd_rg:cl_activate_fs[activate_fs_process_group:672] ALLNOERROR=All_non_error_filesystems +epprd_rg:cl_activate_fs[activate_fs_process_group:673] cl_RMupdate resource_up All_non_error_filesystems cl_activate_fs 2023-01-28T18:07:00.932509 2023-01-28T18:07:00.936859 +epprd_rg:cl_activate_fs[activate_fs_process_group:676] : And harvest any status from the background mount operations +epprd_rg:cl_activate_fs[activate_fs_process_group:678] [[ -f /tmp/epprd_rg_activate_fs.tmp27263242 ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:688] return 0 +epprd_rg:cl_activate_fs[activate_fs_process_resources:767] RC=0 +epprd_rg:cl_activate_fs[activate_fs_process_resources:768] (( 0 != 0 && 0 == 0 )) +epprd_rg:cl_activate_fs[activate_fs_process_resources:772] RG_FILE_SYSTEMS='' +epprd_rg:cl_activate_fs[activate_fs_process_resources:776] return 0 +epprd_rg:cl_activate_fs[851] STATUS=0 +epprd_rg:cl_activate_fs[873] return 0 +epprd_rg:process_resources(8.342)[process_file_systems:2648] RC=0 +epprd_rg:process_resources(8.342)[process_file_systems:2649] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources(8.342)[process_file_systems:2661] (( 0 != 0 )) +epprd_rg:process_resources(8.342)[process_file_systems:2687] return 0 +epprd_rg:process_resources(8.342)[3483] RC=0 +epprd_rg:process_resources(8.342)[3485] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources(8.342)[3324] true +epprd_rg:process_resources(8.342)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(8.342)[3328] set -a +epprd_rg:process_resources(8.342)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:07:00.950413 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(8.362)[3329] eval JOB_TYPE=SYNC_VGS ACTION=ACQUIRE VOLUME_GROUPS='"datavg"' RESOURCE_GROUPS='"epprd_rg' '"' +epprd_rg:process_resources(8.362)[1] JOB_TYPE=SYNC_VGS +epprd_rg:process_resources(8.362)[1] ACTION=ACQUIRE +epprd_rg:process_resources(8.362)[1] VOLUME_GROUPS=datavg +epprd_rg:process_resources(8.362)[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources(8.362)[3330] RC=0 +epprd_rg:process_resources(8.362)[3331] set +a +epprd_rg:process_resources(8.362)[3333] (( 0 != 0 )) +epprd_rg:process_resources(8.362)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(8.362)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(8.362)[3343] export GROUPNAME +epprd_rg:process_resources(8.362)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(8.362)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(8.362)[3360] [[ SYNC_VGS == RELEASE ]] +epprd_rg:process_resources(8.362)[3360] [[ SYNC_VGS == ONLINE ]] +epprd_rg:process_resources(8.362)[3474] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources(8.362)[3476] sync_volume_groups +epprd_rg:process_resources(8.362)[sync_volume_groups:2699] PS4_FUNC=sync_volume_groups +epprd_rg:process_resources(8.362)[sync_volume_groups:2699] typeset PS4_FUNC +epprd_rg:process_resources(8.362)[sync_volume_groups:2700] [[ high == high ]] +epprd_rg:process_resources(8.362)[sync_volume_groups:2700] set -x +epprd_rg:process_resources(8.362)[sync_volume_groups:2701] STAT=0 +epprd_rg:process_resources(8.362)[sync_volume_groups:2704] export GROUPNAME +epprd_rg:process_resources(8.363)[sync_volume_groups:2706] get_list_head datavg +epprd_rg:process_resources(8.363)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(8.363)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(8.363)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(8.363)[get_list_head:60] set -x +epprd_rg:process_resources(8.364)[get_list_head:61] echo datavg +epprd_rg:process_resources(8.364)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(8.364)[get_list_head:61] IFS=: +epprd_rg:process_resources(8.365)[get_list_head:62] echo datavg +epprd_rg:process_resources(8.366)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(8.363)[sync_volume_groups:2706] read LIST_OF_VOLUME_GROUPS_FOR_RG +epprd_rg:process_resources(8.369)[sync_volume_groups:2707] get_list_tail datavg +epprd_rg:process_resources(8.370)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(8.370)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(8.370)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(8.370)[get_list_tail:68] set -x +epprd_rg:process_resources(8.371)[get_list_tail:69] echo datavg +epprd_rg:process_resources(8.371)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(8.371)[get_list_tail:69] IFS=: +epprd_rg:process_resources(8.372)[get_list_tail:70] echo +epprd_rg:process_resources(8.371)[sync_volume_groups:2707] read VOLUME_GROUPS +epprd_rg:process_resources(8.372)[sync_volume_groups:2710] : Sync the active volume groups +epprd_rg:process_resources(8.374)[sync_volume_groups:2712] lsvg -L -o +epprd_rg:process_resources(8.374)[sync_volume_groups:2712] 2> /tmp/lsvg.err +epprd_rg:process_resources(8.374)[sync_volume_groups:2712] sort +epprd_rg:process_resources(8.375)[sync_volume_groups:2712] 1> /tmp/lsvg.out.20185524 +epprd_rg:process_resources(8.382)[sync_volume_groups:2713] echo datavg +epprd_rg:process_resources(8.383)[sync_volume_groups:2713] tr ' ' '\n' +epprd_rg:process_resources(8.385)[sync_volume_groups:2714] comm -12 /tmp/lsvg.out.20185524 - +epprd_rg:process_resources(8.384)[sync_volume_groups:2714] sort +epprd_rg:process_resources(8.390)[sync_volume_groups:2718] [[ -s /tmp/lsvg.err ]] +epprd_rg:process_resources(8.390)[sync_volume_groups:2723] rm -f /tmp/lsvg.out.20185524 /tmp/lsvg.err +epprd_rg:process_resources(8.390)[sync_volume_groups:2716] cl_sync_vgs datavg +epprd_rg:process_resources(8.394)[sync_volume_groups:2732] unset AM_SYNC_CALLED_BY +epprd_rg:process_resources(8.394)[sync_volume_groups:2734] return 0 +epprd_rg:process_resources(8.395)[3324] true +epprd_rg:process_resources(8.395)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(8.395)[3328] set -a +epprd_rg:process_resources(8.395)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa +epprd_rg:cl_sync_vgs[303] version=1.24.1.4 +epprd_rg:cl_sync_vgs[306] (( 1 == 0 )) +epprd_rg:cl_sync_vgs[312] : syncing 4 stale PPs at a time seems to be a win most of the time, but +epprd_rg:cl_sync_vgs[313] : we honor the NUM_PARALLEL_LPS value from /etc/environment, as does +epprd_rg:cl_sync_vgs[314] : syncvg. +epprd_rg:cl_sync_vgs[316] syncflag='' +epprd_rg:cl_sync_vgs[316] export syncflag +epprd_rg:cl_sync_vgs[317] PS4_LOOP='' +epprd_rg:cl_sync_vgs[317] export PS4_LOOP +epprd_rg:cl_sync_vgs[318] typeset -i npl +epprd_rg:cl_sync_vgs[319] grep -q ^NUM_PARALLEL_LPS= /etc/environment 2023-01-28T18:07:01.003220 clrgpa +epprd_rg:cl_sync_vgs[321] syncflag=-P4 +epprd_rg:cl_sync_vgs[328] echo 'NOTE: While the sync is going on, volume group can be used' +epprd_rg:clRGPA[+55] exit 0 NOTE: While the sync is going on, volume group can be used +epprd_rg:cl_sync_vgs[331] : For GLVM volume groups, read PARALLEL LPS count from HACMPresource if it is set from GUI, +epprd_rg:cl_sync_vgs[332] : else, read from environment variables, if it is not set use 32 as default value. +epprd_rg:cl_sync_vgs[334] clodmget -q name='GMVG_REP_RESOURCE and value=datavg' -f group HACMPresource +epprd_rg:process_resources(8.408)[3329] eval JOB_TYPE=EXPORT_FILESYSTEMS ACTION=ACQUIRE EXPORT_FILE_SYSTEMS='"/board_org,/sapmnt/EPP"' EXPORT_FILE_SYSTEMS_V4='""' RESOURCE_GROUPS='"epprd_rg' '"' STABLE_STORAGE_PATH='""' IP_LABELS='"epprd:epprda:epprds"' DAEMONS='"NFS' 'RPCLOCKD"' +epprd_rg:process_resources(8.408)[1] JOB_TYPE=EXPORT_FILESYSTEMS +epprd_rg:process_resources(8.408)[1] ACTION=ACQUIRE +epprd_rg:process_resources(8.408)[1] EXPORT_FILE_SYSTEMS=/board_org,/sapmnt/EPP +epprd_rg:process_resources(8.408)[1] EXPORT_FILE_SYSTEMS_V4='' +epprd_rg:process_resources(8.408)[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources(8.408)[1] STABLE_STORAGE_PATH='' +epprd_rg:process_resources(8.408)[1] IP_LABELS=epprd:epprda:epprds +epprd_rg:process_resources(8.408)[1] DAEMONS='NFS RPCLOCKD' +epprd_rg:process_resources(8.408)[3330] RC=0 +epprd_rg:process_resources(8.408)[3331] set +a +epprd_rg:process_resources(8.408)[3333] (( 0 != 0 )) +epprd_rg:process_resources(8.408)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(8.408)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(8.408)[3343] export GROUPNAME +epprd_rg:process_resources(8.408)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(8.408)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(8.408)[3360] [[ EXPORT_FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources(8.408)[3360] [[ EXPORT_FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources(8.408)[3595] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources(8.408)[3597] export_filesystems +epprd_rg:process_resources(8.408)[export_filesystems:1621] PS4_FUNC=export_filesystems +epprd_rg:process_resources(8.408)[export_filesystems:1621] typeset PS4_FUNC +epprd_rg:process_resources(8.408)[export_filesystems:1622] [[ high == high ]] +epprd_rg:process_resources(8.408)[export_filesystems:1622] set -x +epprd_rg:process_resources(8.408)[export_filesystems:1623] STAT=0 +epprd_rg:process_resources(8.408)[export_filesystems:1624] NFSSTOPPED=0 +epprd_rg:process_resources(8.408)[export_filesystems:1629] [[ NFS == RPCLOCKD ]] +epprd_rg:process_resources(8.408)[export_filesystems:1629] [[ RPCLOCKD == RPCLOCKD ]] +epprd_rg:process_resources(8.408)[export_filesystems:1631] stopsrc -s rpc.lockd +epprd_rg:cl_sync_vgs[334] 2> /dev/null +epprd_rg:cl_sync_vgs[334] glvm_rg='' +epprd_rg:cl_sync_vgs[335] [[ -n '' ]] +epprd_rg:cl_sync_vgs[353] check_sync datavg +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:76] typeset vg_name +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:77] typeset vgid +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:78] typeset disklist +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:79] typeset lv_name +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:80] typeset -li stale_count +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:81] typeset -li mode +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:82] RC=0 +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:82] typeset -li RC +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:83] typeset site_node_list +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:84] typeset site_choice +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:86] vg_name=datavg +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:87] disklist='' +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:89] getlvodm -v datavg +epprd_rg:cl_sync_vgs(0.020):datavg[check_sync:89] vgid=00c44af100004b00000001851e9dc053 +epprd_rg:cl_sync_vgs(0.020):datavg[check_sync:92] : find disks in the VG that LVM thinks are inaccessable +epprd_rg:cl_sync_vgs(0.020):datavg[check_sync:94] lsvg -L -p datavg +epprd_rg:cl_sync_vgs(0.021):datavg[check_sync:94] LC_ALL=C 0513-044 The rpc.lockd Subsystem was requested to stop. +epprd_rg:process_resources(8.419)[export_filesystems:1633] touch /tmp/.RPCLOCKDSTOPPED +epprd_rg:process_resources(8.422)[export_filesystems:1638] : For NFSv4, cl_export_fs will use STABLE_STORAGE_PATH, which is set by +epprd_rg:process_resources(8.422)[export_filesystems:1639] : clRGPA and can have colon-separated values for multiple RGs. +epprd_rg:process_resources(8.422)[export_filesystems:1640] : We will save off clRGPA values in stable_storage_path and then extract +epprd_rg:process_resources(8.422)[export_filesystems:1641] : each RG into STABLE_STORAGE_PATH for cl_unexport_fs. +epprd_rg:process_resources(8.422)[export_filesystems:1643] stable_storage_path='' +epprd_rg:process_resources(8.422)[export_filesystems:1643] typeset stable_storage_path +epprd_rg:process_resources(8.422)[export_filesystems:1645] export NFSSTOPPED +epprd_rg:process_resources(8.422)[export_filesystems:1650] export GROUPNAME +epprd_rg:process_resources(8.424)[export_filesystems:1652] get_list_head /board_org,/sapmnt/EPP +epprd_rg:process_resources(8.424)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(8.424)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(8.424)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(8.424)[get_list_head:60] set -x +epprd_rg:process_resources(8.425)[get_list_head:61] echo /board_org,/sapmnt/EPP +epprd_rg:process_resources(8.426)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(8.426)[get_list_head:61] IFS=: +epprd_rg:process_resources(8.427)[get_list_head:62] echo /board_org,/sapmnt/EPP +epprd_rg:process_resources(8.428)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(8.427)[export_filesystems:1652] read LIST_OF_EXPORT_FILE_SYSTEMS_FOR_RG +epprd_rg:process_resources(8.431)[export_filesystems:1653] get_list_tail /board_org,/sapmnt/EPP +epprd_rg:process_resources(8.432)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(8.432)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(8.432)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(8.432)[get_list_tail:68] set -x +epprd_rg:process_resources(8.433)[get_list_tail:69] echo /board_org,/sapmnt/EPP +epprd_rg:process_resources(8.434)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(8.434)[get_list_tail:69] IFS=: +epprd_rg:process_resources(8.434)[get_list_tail:70] echo +epprd_rg:process_resources(8.435)[export_filesystems:1653] read EXPORT_FILE_SYSTEMS +epprd_rg:process_resources(8.437)[export_filesystems:1654] get_list_head +epprd_rg:process_resources(8.437)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(8.437)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(8.437)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(8.437)[get_list_head:60] set -x +epprd_rg:process_resources(8.438)[get_list_head:61] echo +epprd_rg:process_resources(8.439)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(8.439)[get_list_head:61] IFS=: +epprd_rg:process_resources(8.440)[get_list_head:62] echo +epprd_rg:process_resources(8.440)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(8.440)[export_filesystems:1654] read LIST_OF_EXPORT_FILE_SYSTEMS_V4_FOR_RG +epprd_rg:process_resources(8.444)[export_filesystems:1655] get_list_tail +epprd_rg:process_resources(8.445)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(8.445)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(8.445)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(8.445)[get_list_tail:68] set -x +epprd_rg:process_resources(8.446)[get_list_tail:69] echo +epprd_rg:process_resources(8.446)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(8.446)[get_list_tail:69] IFS=: +epprd_rg:process_resources(8.446)[get_list_tail:70] echo +epprd_rg:process_resources(8.445)[export_filesystems:1655] read EXPORT_FILE_SYSTEMS_V4 +epprd_rg:process_resources(8.449)[export_filesystems:1656] get_list_head +epprd_rg:process_resources(8.449)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(8.449)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(8.449)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(8.449)[get_list_head:60] set -x +epprd_rg:process_resources(8.450)[get_list_head:61] echo +epprd_rg:process_resources(8.450)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(8.450)[get_list_head:61] IFS=: +epprd_rg:process_resources(8.451)[get_list_head:62] echo +epprd_rg:process_resources(8.452)[get_list_head:62] tr , ' ' +epprd_rg:cl_sync_vgs(0.062):datavg[check_sync:94] disklist=$'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:process_resources(8.449)[export_filesystems:1656] read STABLE_STORAGE_PATH +epprd_rg:cl_sync_vgs(0.063):datavg[check_sync:95] print -- $'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:process_resources(8.457)[export_filesystems:1657] get_list_tail +epprd_rg:process_resources(8.457)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(8.457)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(8.457)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(8.458)[get_list_tail:68] set -x +epprd_rg:process_resources(8.459)[get_list_tail:69] echo +epprd_rg:cl_sync_vgs(0.065):datavg[check_sync:95] grep -w missing +epprd_rg:process_resources(8.460)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(8.460)[get_list_tail:69] IFS=: +epprd_rg:process_resources(8.460)[get_list_tail:70] echo +epprd_rg:cl_sync_vgs(0.067):datavg[check_sync:95] cut -f1 '-d ' +epprd_rg:process_resources(8.462)[export_filesystems:1657] read stable_storage_path +epprd_rg:process_resources(8.462)[export_filesystems:1659] cl_export_fs epprd:epprda:epprds '/board_org /sapmnt/EPP' '' +epprd_rg:cl_sync_vgs(0.070):datavg[check_sync:95] missing_disklist='' +epprd_rg:cl_sync_vgs(0.071):datavg[check_sync:96] print -- $'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:cl_sync_vgs(0.073):datavg[check_sync:96] grep -w removed +epprd_rg:cl_sync_vgs(0.075):datavg[check_sync:96] cut -f1 '-d ' +epprd_rg:cl_export_fs[102] version=%I% +epprd_rg:cl_export_fs[105] . /usr/es/sbin/cluster/events/utils/cl_nfs_utils +epprd_rg:cl_export_fs[98] PROGNAME=cl_export_fs +epprd_rg:cl_export_fs[99] [[ high == high ]] +epprd_rg:cl_export_fs[101] set -x +epprd_rg:cl_export_fs[102] version=%I +epprd_rg:cl_export_fs[105] cl_exports_data='' +epprd_rg:cl_export_fs[105] typeset cl_exports_data +epprd_rg:cl_export_fs[106] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_export_fs[107] HOST=epprd:epprda:epprds +epprd_rg:cl_export_fs[108] EXPORT_V3='/board_org /sapmnt/EPP' +epprd_rg:cl_export_fs[109] EXPORT_V4='' +epprd_rg:cl_export_fs[111] STATUS=0 +epprd_rg:cl_export_fs[113] LIMIT=60 +epprd_rg:cl_export_fs[113] WAIT=1 +epprd_rg:cl_export_fs[113] TRY=0 +epprd_rg:cl_export_fs[113] typeset -li LIMIT WAIT TRY +epprd_rg:cl_export_fs[115] PROC_RES=false +epprd_rg:cl_export_fs[118] : If JOB_TYPE is set, and it does not equal to GROUP, then +epprd_rg:cl_export_fs[119] : we are processing for process_resources +epprd_rg:cl_export_fs[121] [[ EXPORT_FILESYSTEMS != 0 ]] +epprd_rg:cl_export_fs[121] [[ EXPORT_FILESYSTEMS != GROUP ]] +epprd_rg:cl_export_fs[122] PROC_RES=true +epprd_rg:cl_export_fs[125] set -u +epprd_rg:cl_export_fs[127] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_export_fs[129] (( 3 < 2 || 3 > 3 )) +epprd_rg:cl_export_fs[142] DARE_EVENT=reconfig_resource_acquire +epprd_rg:cl_export_fs[145] : Check memory to see if NFSv4 exports have been configured. +epprd_rg:cl_export_fs[147] export_v4='' +epprd_rg:cl_export_fs[148] [[ -z '' ]] +epprd_rg:cl_export_fs[148] [[ rg_move == reconfig_resource_acquire ]] +epprd_rg:cl_export_fs[158] : If we do not have NFSv4 exports configured, then determine +epprd_rg:cl_export_fs[159] : the protocol versions from the HACMP exports file. +epprd_rg:cl_export_fs[161] [[ -z '' ]] +epprd_rg:cl_export_fs[161] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[163] export_v3='' +epprd_rg:cl_export_fs[169] : Get the export file for the filesystem from the exports file. +epprd_rg:cl_export_fs[170] : Only look at the part of the line preceding comments. +epprd_rg:cl_export_fs[173] getline_exports /board_org +epprd_rg:cl_export_fs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_export_fs[getline_exports:45] line='' +epprd_rg:cl_export_fs[getline_exports:45] typeset line +epprd_rg:cl_export_fs[getline_exports:46] flag=0 +epprd_rg:cl_export_fs[getline_exports:46] typeset -i flag +epprd_rg:cl_export_fs[getline_exports:47] fs=/board_org +epprd_rg:cl_export_fs[getline_exports:49] [[ -z /board_org ]] +epprd_rg:cl_export_fs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_sync_vgs(0.078):datavg[check_sync:96] removed_disklist='' +epprd_rg:cl_sync_vgs(0.078):datavg[check_sync:100] : Proceeed if there are some disks that LVM thinks are inaccessable +epprd_rg:cl_sync_vgs(0.078):datavg[check_sync:102] [[ -n '' ]] +epprd_rg:cl_sync_vgs(0.078):datavg[check_sync:102] [[ -n '' ]] +epprd_rg:cl_sync_vgs(0.078):datavg[check_sync:196] : sync if any LVs in the VG that have stale partitions +epprd_rg:cl_sync_vgs(0.078):datavg[check_sync:198] (( 0 == 0 )) +epprd_rg:cl_sync_vgs(0.078):datavg[check_sync:201] : A status of 2,3,5 or 7 indicates the presence of dirty and/or stale partitions +epprd_rg:cl_sync_vgs(0.078):datavg[check_sync:213] is_start_logged=0 +epprd_rg:cl_sync_vgs(0.078):datavg[check_sync:218] at_least_one_sync_success=0 +epprd_rg:cl_export_fs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_sync_vgs(0.079):datavg[check_sync:219] lqueryvg -g 00c44af100004b00000001851e9dc053 -L +epprd_rg:cl_export_fs[getline_exports:56] read -r line +epprd_rg:cl_export_fs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_export_fs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_export_fs[getline_exports:69] (( 1 == 0 )) +epprd_rg:cl_export_fs[getline_exports:74] [[ 0 == 1 ]] +epprd_rg:cl_export_fs[getline_exports:56] read -r line +epprd_rg:cl_export_fs[getline_exports:89] return 0 +epprd_rg:cl_export_fs[174] export_line='' +epprd_rg:cl_export_fs[177] : The line is of the format: filesystem -option1,option2,... +epprd_rg:cl_export_fs[178] : This will give 'option1 option2 ...' +epprd_rg:cl_export_fs[180] echo +epprd_rg:cl_export_fs[180] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_export_fs[181] tr , ' ' +epprd_rg:cl_export_fs[180] cut -d- -f2- +epprd_rg:cl_export_fs[180] options='' +epprd_rg:cl_export_fs[184] : Each option can be of the format name=value, or just name. +epprd_rg:cl_export_fs[185] : We only care about the vers option. +epprd_rg:cl_export_fs[189] : Have we seen the vers 'option?' +epprd_rg:cl_export_fs[191] vers_missing=1 +epprd_rg:cl_export_fs[194] : Loop through all of the export options for this export. +epprd_rg:cl_export_fs[218] : If we did not find the vers option, then NFSv3 is the default. +epprd_rg:cl_export_fs[220] (( vers_missing )) +epprd_rg:cl_export_fs[220] export_v3=' /board_org' +epprd_rg:cl_export_fs[169] : Get the export file for the filesystem from the exports file. +epprd_rg:cl_export_fs[170] : Only look at the part of the line preceding comments. +epprd_rg:cl_export_fs[173] getline_exports /sapmnt/EPP +epprd_rg:cl_export_fs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_export_fs[getline_exports:45] line='' +epprd_rg:cl_export_fs[getline_exports:45] typeset line +epprd_rg:cl_export_fs[getline_exports:46] flag=0 +epprd_rg:cl_export_fs[getline_exports:46] typeset -i flag +epprd_rg:cl_export_fs[getline_exports:47] fs=/sapmnt/EPP +epprd_rg:cl_export_fs[getline_exports:49] [[ -z /sapmnt/EPP ]] +epprd_rg:cl_export_fs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_export_fs[getline_exports:56] read -r line +epprd_rg:cl_export_fs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_export_fs[getline_exports:68] grep -q '^[[:space:]]*/sapmnt/EPP[[:space:]]' +epprd_rg:cl_export_fs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:69] (( 0 == 0 )) +epprd_rg:cl_export_fs[getline_exports:71] flag=1 +epprd_rg:cl_export_fs[getline_exports:74] [[ 1 == 1 ]] +epprd_rg:cl_export_fs[getline_exports:76] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:76] grep -w '\\' +epprd_rg:cl_sync_vgs(0.081):datavg[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.101):datavg[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.101):datavg.epprdaloglv[check_sync:221] PS4_LOOP=datavg.epprdaloglv +epprd_rg:cl_sync_vgs(0.102):datavg.epprdaloglv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.102):datavg.epprdaloglv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.102):datavg.epprdaloglv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.102):datavg.epprdaloglv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.102):datavg.epprdaloglv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.102):datavg.saplv[check_sync:221] PS4_LOOP=datavg.saplv +epprd_rg:cl_sync_vgs(0.102):datavg.saplv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.102):datavg.saplv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.102):datavg.saplv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.102):datavg.saplv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.102):datavg.saplv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.102):datavg.sapmntlv[check_sync:221] PS4_LOOP=datavg.sapmntlv +epprd_rg:cl_sync_vgs(0.102):datavg.sapmntlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.102):datavg.sapmntlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.102):datavg.sapmntlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.102):datavg.sapmntlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.102):datavg.sapmntlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.102):datavg.oraclelv[check_sync:221] PS4_LOOP=datavg.oraclelv +epprd_rg:cl_sync_vgs(0.102):datavg.oraclelv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.102):datavg.oraclelv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.102):datavg.oraclelv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.102):datavg.oraclelv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.102):datavg.oraclelv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.102):datavg.epplv[check_sync:221] PS4_LOOP=datavg.epplv +epprd_rg:cl_sync_vgs(0.102):datavg.epplv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.102):datavg.epplv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.102):datavg.epplv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.102):datavg.epplv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.102):datavg.epplv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.102):datavg.oraarchlv[check_sync:221] PS4_LOOP=datavg.oraarchlv +epprd_rg:cl_sync_vgs(0.102):datavg.oraarchlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.102):datavg.oraarchlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.102):datavg.oraarchlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.102):datavg.oraarchlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.102):datavg.oraarchlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.102):datavg.sapdata1lv[check_sync:221] PS4_LOOP=datavg.sapdata1lv +epprd_rg:cl_sync_vgs(0.102):datavg.sapdata1lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.102):datavg.sapdata1lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.102):datavg.sapdata1lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.102):datavg.sapdata1lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.102):datavg.sapdata1lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.102):datavg.sapdata2lv[check_sync:221] PS4_LOOP=datavg.sapdata2lv +epprd_rg:cl_sync_vgs(0.102):datavg.sapdata2lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.102):datavg.sapdata2lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.102):datavg.sapdata2lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.102):datavg.sapdata2lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.102):datavg.sapdata2lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.102):datavg.sapdata3lv[check_sync:221] PS4_LOOP=datavg.sapdata3lv +epprd_rg:cl_sync_vgs(0.102):datavg.sapdata3lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.102):datavg.sapdata3lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.102):datavg.sapdata3lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.102):datavg.sapdata3lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.102):datavg.sapdata3lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.102):datavg.sapdata4lv[check_sync:221] PS4_LOOP=datavg.sapdata4lv +epprd_rg:cl_sync_vgs(0.102):datavg.sapdata4lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.102):datavg.sapdata4lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.102):datavg.sapdata4lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.102):datavg.sapdata4lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.102):datavg.sapdata4lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.102):datavg.boardlv[check_sync:221] PS4_LOOP=datavg.boardlv +epprd_rg:cl_sync_vgs(0.102):datavg.boardlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.102):datavg.boardlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.102):datavg.boardlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.102):datavg.boardlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.102):datavg.boardlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.102):datavg.origlogAlv[check_sync:221] PS4_LOOP=datavg.origlogAlv +epprd_rg:cl_sync_vgs(0.102):datavg.origlogAlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.102):datavg.origlogAlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.102):datavg.origlogAlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.102):datavg.origlogAlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.102):datavg.origlogAlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.102):datavg.origlogBlv[check_sync:221] PS4_LOOP=datavg.origlogBlv +epprd_rg:cl_sync_vgs(0.102):datavg.origlogBlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.102):datavg.origlogBlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.102):datavg.origlogBlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.102):datavg.origlogBlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.102):datavg.origlogBlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.102):datavg.mirrlogAlv[check_sync:221] PS4_LOOP=datavg.mirrlogAlv +epprd_rg:cl_sync_vgs(0.102):datavg.mirrlogAlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.102):datavg.mirrlogAlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.102):datavg.mirrlogAlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.102):datavg.mirrlogAlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.102):datavg.mirrlogAlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.102):datavg.mirrlogBlv[check_sync:221] PS4_LOOP=datavg.mirrlogBlv +epprd_rg:cl_sync_vgs(0.103):datavg.mirrlogBlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.103):datavg.mirrlogBlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.103):datavg.mirrlogBlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.103):datavg.mirrlogBlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.103):datavg.mirrlogBlv[check_sync:268] [[ -n RG_MOVE ]] +epprd_rg:cl_sync_vgs(0.103):datavg.mirrlogBlv[check_sync:268] (( 0 == 1 )) +epprd_rg:cl_sync_vgs[355] exit 0 +epprd_rg:cl_export_fs[getline_exports:76] [[ -n '' ]] +epprd_rg:cl_export_fs[getline_exports:81] cl_exports_data=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:82] break +epprd_rg:cl_export_fs[getline_exports:89] return 0 +epprd_rg:cl_export_fs[174] export_line=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[177] : The line is of the format: filesystem -option1,option2,... +epprd_rg:cl_export_fs[178] : This will give 'option1 option2 ...' +epprd_rg:cl_export_fs[180] echo /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_export_fs[180] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_export_fs[180] cut -d- -f2- +epprd_rg:cl_export_fs[181] tr , ' ' +epprd_rg:cl_export_fs[180] options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap root=epprdap ' +epprd_rg:cl_export_fs[184] : Each option can be of the format name=value, or just name. +epprd_rg:cl_export_fs[185] : We only care about the vers option. +epprd_rg:cl_export_fs[189] : Have we seen the vers 'option?' +epprd_rg:cl_export_fs[191] vers_missing=1 +epprd_rg:cl_export_fs[194] : Loop through all of the export options for this export. +epprd_rg:cl_export_fs[218] : If we did not find the vers option, then NFSv3 is the default. +epprd_rg:cl_export_fs[220] (( vers_missing )) +epprd_rg:cl_export_fs[220] export_v3=' /board_org /sapmnt/EPP' +epprd_rg:cl_export_fs[223] EXPORT_V3=' /board_org /sapmnt/EPP' +epprd_rg:cl_export_fs[224] EXPORT_V4='' +epprd_rg:cl_export_fs[227] /usr/sbin/bootinfo -K +epprd_rg:cl_export_fs[227] KERNEL_BITS=64 +epprd_rg:cl_export_fs[229] subsystems='nfsd rpc.mountd' +epprd_rg:cl_export_fs[230] [[ -n '' ]] +epprd_rg:cl_export_fs[233] : Special processing for cross mounts of EFS keys +epprd_rg:cl_export_fs[234] : The overmount of /var/efs must be removed prior +epprd_rg:cl_export_fs[235] : to stopping or restarting NFS, since the SRC +epprd_rg:cl_export_fs[236] : operations will attempt to check the EFS enablement. +epprd_rg:cl_export_fs[238] grep -w /var/efs +epprd_rg:cl_export_fs[238] mount +epprd_rg:cl_export_fs[238] mounted_info='' +epprd_rg:cl_export_fs[239] [[ -n '' ]] +epprd_rg:cl_export_fs[295] : Kill and restart everything in '"nfsd' 'rpc.mountd"' +epprd_rg:cl_export_fs[299] : Kill nfsd, and restart it below +epprd_rg:cl_export_fs[306] [[ nfsd == nfsd ]] +epprd_rg:cl_export_fs[307] [[ 64 == 64 ]] +epprd_rg:cl_export_fs[307] [[ -x /usr/sbin/nfs4smctl ]] +epprd_rg:cl_export_fs[308] [[ ! -s /etc/xtab ]] +epprd_rg:cl_export_fs[311] clcheck_server nfsd +epprd_rg:clcheck_server[118] [[ high == high ]] +epprd_rg:clcheck_server[118] version=1.10.4.2 +epprd_rg:clcheck_server[119] cl_get_path +epprd_rg:clcheck_server[119] HA_DIR=es +epprd_rg:clcheck_server[121] SERVER=nfsd +epprd_rg:clcheck_server[122] STATUS=0 +epprd_rg:clcheck_server[123] FATAL_ERROR=255 +epprd_rg:clcheck_server[124] retries=0 +epprd_rg:clcheck_server[124] typeset -li retries +epprd_rg:clcheck_server[126] [[ -n nfsd ]] +epprd_rg:clcheck_server[131] lssrc -s nfsd +epprd_rg:clcheck_server[131] LC_ALL=C +epprd_rg:clcheck_server[131] grep 'not on file' +epprd_rg:clcheck_server[131] wc -l +epprd_rg:clcheck_server[131] rc=' 0' +epprd_rg:clcheck_server[133] (( 0 == 1 )) +epprd_rg:clcheck_server[143] [[ 0 =~ 3 ]] +epprd_rg:clcheck_server[147] lssrc -s nfsd +epprd_rg:clcheck_server[147] 1> /dev/null 2> /dev/null +epprd_rg:clcheck_server[161] egrep 'stop|active' +epprd_rg:clcheck_server[161] lssrc -s nfsd +epprd_rg:clcheck_server[161] LC_ALL=C +epprd_rg:clcheck_server[161] check_if_down=' nfsd nfs 28377402 active' +epprd_rg:clcheck_server[166] [[ -z ' nfsd nfs 28377402 active' ]] +epprd_rg:clcheck_server[187] check_server_extended nfsd +epprd_rg:clcheck_server[check_server_extended:55] [[ high == high ]] +epprd_rg:clcheck_server[check_server_extended:55] set -x +epprd_rg:clcheck_server[check_server_extended:58] SERVER=nfsd +epprd_rg:clcheck_server[check_server_extended:58] typeset SERVER +epprd_rg:clcheck_server[check_server_extended:59] STATUS=1 +epprd_rg:clcheck_server[check_server_extended:59] typeset STATUS +epprd_rg:clcheck_server[check_server_extended:87] echo 1 +epprd_rg:clcheck_server[check_server_extended:88] return +epprd_rg:clcheck_server[187] STATUS=1 +epprd_rg:clcheck_server[188] return 1 +epprd_rg:cl_export_fs[329] : nfsv4 daemon not stopped due to existing mounts +epprd_rg:cl_export_fs[330] : Turn on NFSv4 grace periods and ignore any errors. +epprd_rg:cl_export_fs[332] chnfs -I -g on -x 1 +epprd_rg:cl_export_fs[332] ODMDIR=/etc/objrepos 0513-077 Subsystem has been changed. 0513-077 Subsystem has been changed. +epprd_rg:cl_export_fs[299] : Kill rpc.mountd, and restart it below +epprd_rg:cl_export_fs[306] [[ rpc.mountd == nfsd ]] +epprd_rg:cl_export_fs[336] : Friendly stop of rpc.mountd +epprd_rg:cl_export_fs[338] lssrc -s rpc.mountd +epprd_rg:cl_export_fs[338] LC_ALL=C +epprd_rg:cl_export_fs[338] tail +2 +epprd_rg:cl_export_fs[338] grep -qw active +epprd_rg:cl_export_fs[338] stopsrc -s rpc.mountd 0513-044 The rpc.mountd Subsystem was requested to stop. +epprd_rg:cl_export_fs[341] : Now, wait for rpc.mountd to die +epprd_rg:cl_export_fs[343] (( TRY=0)) +epprd_rg:cl_export_fs[343] (( 0 < 60)) +epprd_rg:cl_export_fs[345] tail +2 +epprd_rg:cl_export_fs[345] lssrc -s rpc.mountd +epprd_rg:cl_export_fs[345] LC_ALL=C +epprd_rg:cl_export_fs[345] subsys_state=' rpc.mountd nfs inoperative' +epprd_rg:cl_export_fs[346] print -- ' rpc.mountd nfs inoperative' +epprd_rg:cl_export_fs[346] grep -qw inoperative +epprd_rg:cl_export_fs[348] [[ high == high ]] +epprd_rg:cl_export_fs[348] set -x +epprd_rg:cl_export_fs[349] subsys_state=inoperative +epprd_rg:cl_export_fs[350] break +epprd_rg:cl_export_fs[356] [[ high == high ]] +epprd_rg:cl_export_fs[356] set -x +epprd_rg:cl_export_fs[358] [[ inoperative != inoperative ]] +epprd_rg:cl_export_fs[382] : If stopsrc has failed to stop rpc.mountd, +epprd_rg:cl_export_fs[383] : use a real kill on the daemon +epprd_rg:cl_export_fs[385] ps -eo comm,pid +epprd_rg:cl_export_fs[385] grep -w rpc.mountd +epprd_rg:cl_export_fs[385] grep -vw grep +epprd_rg:cl_export_fs[385] read skip subsys_pid rest +epprd_rg:cl_export_fs[386] [[ '' == +([0-9]) ]] +epprd_rg:cl_export_fs[389] : If rpc.mountd has been stopped, +epprd_rg:cl_export_fs[390] : start it back up again. +epprd_rg:cl_export_fs[392] clcheck_server rpc.mountd +epprd_rg:clcheck_server[118] [[ high == high ]] +epprd_rg:clcheck_server[118] version=1.10.4.2 +epprd_rg:clcheck_server[119] cl_get_path +epprd_rg:clcheck_server[119] HA_DIR=es +epprd_rg:clcheck_server[121] SERVER=rpc.mountd +epprd_rg:clcheck_server[122] STATUS=0 +epprd_rg:clcheck_server[123] FATAL_ERROR=255 +epprd_rg:clcheck_server[124] retries=0 +epprd_rg:clcheck_server[124] typeset -li retries +epprd_rg:clcheck_server[126] [[ -n rpc.mountd ]] +epprd_rg:clcheck_server[131] lssrc -s rpc.mountd +epprd_rg:clcheck_server[131] LC_ALL=C +epprd_rg:clcheck_server[131] grep 'not on file' +epprd_rg:clcheck_server[131] wc -l +epprd_rg:clcheck_server[131] rc=' 0' +epprd_rg:clcheck_server[133] (( 0 == 1 )) +epprd_rg:clcheck_server[143] [[ 0 =~ 3 ]] +epprd_rg:clcheck_server[147] lssrc -s rpc.mountd +epprd_rg:clcheck_server[147] 1> /dev/null 2> /dev/null +epprd_rg:clcheck_server[161] egrep 'stop|active' +epprd_rg:clcheck_server[161] lssrc -s rpc.mountd +epprd_rg:clcheck_server[161] LC_ALL=C +epprd_rg:clcheck_server[161] check_if_down='' +epprd_rg:clcheck_server[166] [[ -z '' ]] +epprd_rg:clcheck_server[171] sleep 1 +epprd_rg:clcheck_server[172] lssrc -s rpc.mountd +epprd_rg:clcheck_server[172] LC_ALL=C +epprd_rg:clcheck_server[172] egrep 'stop|active' +epprd_rg:clcheck_server[172] check_if_down='' +epprd_rg:clcheck_server[173] [[ -z '' ]] +epprd_rg:clcheck_server[177] return 0 +epprd_rg:cl_export_fs[394] [[ rpc.mountd == nfsd ]] +epprd_rg:cl_export_fs[403] : Start rpc.mountd back up again +epprd_rg:cl_export_fs[405] startsrc -s rpc.mountd 0513-059 The rpc.mountd Subsystem has been started. Subsystem PID is 24641920. +epprd_rg:cl_export_fs[406] rc=0 +epprd_rg:cl_export_fs[407] (( 0 == 0 )) +epprd_rg:cl_export_fs[409] sleep 3 +epprd_rg:cl_export_fs[410] lssrc -s rpc.mountd +epprd_rg:cl_export_fs[410] LC_ALL=C +epprd_rg:cl_export_fs[410] tail +2 +epprd_rg:cl_export_fs[410] subsys_state=' rpc.mountd nfs 24641920 active' +epprd_rg:cl_export_fs[413] (( 0 != 0 )) +epprd_rg:cl_export_fs[413] print -- ' rpc.mountd nfs 24641920 active' +epprd_rg:cl_export_fs[413] grep -qw active +epprd_rg:cl_export_fs[431] : Set the NFSv4 nfsroot parameter. This must be set prior to any +epprd_rg:cl_export_fs[432] : NFS exports that use the exname option, and cannot be set to a new +epprd_rg:cl_export_fs[433] : value if any exname exports already exist. This is normally done +epprd_rg:cl_export_fs[434] : at IPL, but rc.nfs is not run at boot when HACMP is installed. +epprd_rg:cl_export_fs[436] [[ -n '' ]] +epprd_rg:cl_export_fs[438] hasrv='' +epprd_rg:cl_export_fs[440] [[ -z '' ]] +epprd_rg:cl_export_fs[442] query=name='STABLE_STORAGE_PATH AND group=epprd_rg' +epprd_rg:cl_export_fs[443] odmget -q name='STABLE_STORAGE_PATH AND group=epprd_rg' HACMPresource +epprd_rg:cl_export_fs[444] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_export_fs[443] STABLE_STORAGE_PATH='' +epprd_rg:cl_export_fs[447] [[ -z '' ]] +epprd_rg:cl_export_fs[449] STABLE_STORAGE_PATH=/var/adm/nfsv4.hacmp/epprd_rg +epprd_rg:cl_export_fs[452] [[ -z '' ]] +epprd_rg:cl_export_fs[454] query=name='STABLE_STORAGE_COOKIE AND group=epprd_rg' +epprd_rg:cl_export_fs[455] odmget -q name='STABLE_STORAGE_COOKIE AND group=epprd_rg' HACMPresource +epprd_rg:cl_export_fs[456] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_export_fs[455] STABLE_STORAGE_COOKIE='' +epprd_rg:cl_export_fs[459] [[ -n epprd_rg ]] +epprd_rg:cl_export_fs[461] odmget -q 'name = SERVICE_LABEL and group = epprd_rg' HACMPresource +epprd_rg:cl_export_fs[462] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:cl_export_fs[461] SERVICE_LABEL=epprd +epprd_rg:cl_export_fs[465] primary epprd +epprd_rg:cl_export_fs[primary:55] echo epprd +epprd_rg:cl_export_fs[465] primary=epprd +epprd_rg:cl_export_fs[466] secondary epprd +epprd_rg:cl_export_fs[secondary:74] [[ -n epprd ]] +epprd_rg:cl_export_fs[secondary:74] shift +epprd_rg:cl_export_fs[secondary:75] echo '' +epprd_rg:cl_export_fs[466] secondary='' +epprd_rg:cl_export_fs[468] nfs_node_state='' +epprd_rg:cl_export_fs[471] : Determine if grace periods are enabled +epprd_rg:cl_export_fs[473] ps -eo args +epprd_rg:cl_export_fs[473] grep -w nfsd +epprd_rg:cl_export_fs[473] grep -qw -- '-gp on' +epprd_rg:cl_export_fs[476] gp=off +epprd_rg:cl_export_fs[480] : We can use an NFSv4 node if grace periods are enabled, we are running a +epprd_rg:cl_export_fs[481] : 64-bit kernel, and the nfs4smctl command exists. +epprd_rg:cl_export_fs[483] [[ off == on ]] +epprd_rg:cl_export_fs[487] rm -f '/var/adm/nfsv4.hacmp/epprd_rg/*' +epprd_rg:cl_export_fs[487] 2> /dev/null +epprd_rg:cl_export_fs[491] : If we have NFSv4 exports, then we need to configure our NFS node so that +epprd_rg:cl_export_fs[492] : we can use stable storage. Note, NFS only supports this functionality in +epprd_rg:cl_export_fs[493] : its 64-bit kernels. +epprd_rg:cl_export_fs[495] [[ -n '' ]] +epprd_rg:cl_export_fs[580] [[ '' == acquiring ]] +epprd_rg:cl_export_fs[585] ALLEXPORTS=All_exports +epprd_rg:cl_export_fs[587] : update resource manager with this action +epprd_rg:cl_export_fs[589] cl_RMupdate resource_acquiring All_exports cl_export_fs 2023-01-28T18:07:06.315044 2023-01-28T18:07:06.319337 +epprd_rg:cl_export_fs[592] : Build a list of all filesystems that need to be exported, irrespective of +epprd_rg:cl_export_fs[593] : the protocol version. Since some filesystems may be exported with multiple +epprd_rg:cl_export_fs[594] : versions, remove any duplicates. +epprd_rg:cl_export_fs[596] echo /board_org /sapmnt/EPP +epprd_rg:cl_export_fs[596] tr ' ' '\n' +epprd_rg:cl_export_fs[596] sort -u +epprd_rg:cl_export_fs[596] FILESYSTEM_LIST=$'/board_org\n/sapmnt/EPP' +epprd_rg:cl_export_fs[599] : Loop through all of the filesystems we need to export ... +epprd_rg:cl_export_fs[603] v3='' +epprd_rg:cl_export_fs[604] v4='' +epprd_rg:cl_export_fs[605] root=epprd:epprda:epprds +epprd_rg:cl_export_fs[606] new_options='' +epprd_rg:cl_export_fs[607] export_file_line='' +epprd_rg:cl_export_fs[608] USING_EXPORTS_FILE=0 +epprd_rg:cl_export_fs[609] export_lines='' +epprd_rg:cl_export_fs[610] otheroption='' +epprd_rg:cl_export_fs[613] : Get the export line from exportfs for this export +epprd_rg:cl_export_fs[615] exportfs +epprd_rg:cl_export_fs[615] grep '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_export_fs[615] export_line='' +epprd_rg:cl_export_fs[617] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[620] : Get the export file for the filesystem from the exports file. +epprd_rg:cl_export_fs[621] : Only look at the part of the line preceding comments. +epprd_rg:cl_export_fs[624] getline_exports /board_org +epprd_rg:cl_export_fs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_export_fs[getline_exports:45] line='' +epprd_rg:cl_export_fs[getline_exports:45] typeset line +epprd_rg:cl_export_fs[getline_exports:46] flag=0 +epprd_rg:cl_export_fs[getline_exports:46] typeset -i flag +epprd_rg:cl_export_fs[getline_exports:47] fs=/board_org +epprd_rg:cl_export_fs[getline_exports:49] [[ -z /board_org ]] +epprd_rg:cl_export_fs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_export_fs[getline_exports:56] read -r line +epprd_rg:cl_export_fs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_export_fs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_export_fs[getline_exports:69] (( 1 == 0 )) +epprd_rg:cl_export_fs[getline_exports:74] [[ 0 == 1 ]] +epprd_rg:cl_export_fs[getline_exports:56] read -r line +epprd_rg:cl_export_fs[getline_exports:89] return 0 +epprd_rg:cl_export_fs[625] export_file_line='' +epprd_rg:cl_export_fs[628] : If the administrator provides an entry for the filesystem in the +epprd_rg:cl_export_fs[629] : exports file then ignore the root option that was passed in on the +epprd_rg:cl_export_fs[630] : command line. +epprd_rg:cl_export_fs[632] [[ -n '' ]] +epprd_rg:cl_export_fs[636] : If the filesystem currently is not exported, then get the options from +epprd_rg:cl_export_fs[637] : the exports file. We will merge these options with options specified +epprd_rg:cl_export_fs[638] : through resource group attributes to produce the actual options we will +epprd_rg:cl_export_fs[639] : provide to exportfs. +epprd_rg:cl_export_fs[641] [[ -z '' ]] +epprd_rg:cl_export_fs[643] export_line='' +epprd_rg:cl_export_fs[644] USING_EXPORTS_FILE=1 +epprd_rg:cl_export_fs[648] : In case of multiple exports for same filesystem +epprd_rg:cl_export_fs[649] : Process them line by line +epprd_rg:cl_export_fs[651] set +u +epprd_rg:cl_export_fs[652] oldifs=$' \t\n' +epprd_rg:cl_export_fs[653] IFS=$'\n' +epprd_rg:cl_export_fs[653] export_lines=( ) +epprd_rg:cl_export_fs[654] IFS=$' \t\n' +epprd_rg:cl_export_fs[656] [ -n '' ] +epprd_rg:cl_export_fs[733] set -u +epprd_rg:cl_export_fs[736] : At this point, v3 and v4 are set based on what is actually exported +epprd_rg:cl_export_fs[737] : or what is configured to be exported in the exports file. +epprd_rg:cl_export_fs[740] (( USING_EXPORTS_FILE )) +epprd_rg:cl_export_fs[742] v3='' +epprd_rg:cl_export_fs[743] v4='' +epprd_rg:cl_export_fs[747] : At this point, v3 and v4 are set based on what is actually exported. +epprd_rg:cl_export_fs[748] : Now add additional versions if the resource group has them configured. +epprd_rg:cl_export_fs[752] [[ /board_org == /board_org ]] +epprd_rg:cl_export_fs[752] v3=:2:3 +epprd_rg:cl_export_fs[752] break +epprd_rg:cl_export_fs[761] : Versions 2 and 3 are the default versions. Some versions of AIX do +epprd_rg:cl_export_fs[762] : not support the vers export option, so only use the option if we are +epprd_rg:cl_export_fs[763] : exporting a non-default value such as 4 +epprd_rg:cl_export_fs[765] [[ -n '' ]] +epprd_rg:cl_export_fs[779] [[ -n epprd:epprda:epprds ]] +epprd_rg:cl_export_fs[782] : If we have root priveliged clients, +epprd_rg:cl_export_fs[783] : then add them to the option list. +epprd_rg:cl_export_fs[785] new_options=,root=epprd:epprda:epprds +epprd_rg:cl_export_fs[788] [[ -n '' ]] +epprd_rg:cl_export_fs[798] : Strip off the leading comma +epprd_rg:cl_export_fs[800] cut -d, -f2- +epprd_rg:cl_export_fs[800] echo ,root=epprd:epprda:epprds +epprd_rg:cl_export_fs[800] new_options=root=epprd:epprda:epprds +epprd_rg:cl_export_fs[802] [[ -z root=epprd:epprda:epprds ]] +epprd_rg:cl_export_fs[811] : Exporting filesystem /board_org with options root=epprd:epprda:epprds +epprd_rg:cl_export_fs[813] exportfs -i -o root=epprd:epprda:epprds /board_org +epprd_rg:cl_export_fs[814] RC=0 +epprd_rg:cl_export_fs[817] (( 0 != 0 )) +epprd_rg:cl_export_fs[603] v3='' +epprd_rg:cl_export_fs[604] v4='' +epprd_rg:cl_export_fs[605] root=epprd:epprda:epprds +epprd_rg:cl_export_fs[606] new_options='' +epprd_rg:cl_export_fs[607] export_file_line='' +epprd_rg:cl_export_fs[608] USING_EXPORTS_FILE=0 +epprd_rg:cl_export_fs[609] export_lines='' +epprd_rg:cl_export_fs[610] otheroption='' +epprd_rg:cl_export_fs[613] : Get the export line from exportfs for this export +epprd_rg:cl_export_fs[615] exportfs +epprd_rg:cl_export_fs[615] grep '^[[:space:]]*/sapmnt/EPP[[:space:]]' +epprd_rg:cl_export_fs[615] export_line='' +epprd_rg:cl_export_fs[617] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[620] : Get the export file for the filesystem from the exports file. +epprd_rg:cl_export_fs[621] : Only look at the part of the line preceding comments. +epprd_rg:cl_export_fs[624] getline_exports /sapmnt/EPP +epprd_rg:cl_export_fs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_export_fs[getline_exports:45] line='' +epprd_rg:cl_export_fs[getline_exports:45] typeset line +epprd_rg:cl_export_fs[getline_exports:46] flag=0 +epprd_rg:cl_export_fs[getline_exports:46] typeset -i flag +epprd_rg:cl_export_fs[getline_exports:47] fs=/sapmnt/EPP +epprd_rg:cl_export_fs[getline_exports:49] [[ -z /sapmnt/EPP ]] +epprd_rg:cl_export_fs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_export_fs[getline_exports:56] read -r line +epprd_rg:cl_export_fs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_export_fs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:68] grep -q '^[[:space:]]*/sapmnt/EPP[[:space:]]' +epprd_rg:cl_export_fs[getline_exports:69] (( 0 == 0 )) +epprd_rg:cl_export_fs[getline_exports:71] flag=1 +epprd_rg:cl_export_fs[getline_exports:74] [[ 1 == 1 ]] +epprd_rg:cl_export_fs[getline_exports:76] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:76] grep -w '\\' +epprd_rg:cl_export_fs[getline_exports:76] [[ -n '' ]] +epprd_rg:cl_export_fs[getline_exports:81] cl_exports_data=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:82] break +epprd_rg:cl_export_fs[getline_exports:89] return 0 +epprd_rg:cl_export_fs[625] export_file_line=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[628] : If the administrator provides an entry for the filesystem in the +epprd_rg:cl_export_fs[629] : exports file then ignore the root option that was passed in on the +epprd_rg:cl_export_fs[630] : command line. +epprd_rg:cl_export_fs[632] [[ -n ' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' ]] +epprd_rg:cl_export_fs[632] root='' +epprd_rg:cl_export_fs[636] : If the filesystem currently is not exported, then get the options from +epprd_rg:cl_export_fs[637] : the exports file. We will merge these options with options specified +epprd_rg:cl_export_fs[638] : through resource group attributes to produce the actual options we will +epprd_rg:cl_export_fs[639] : provide to exportfs. +epprd_rg:cl_export_fs[641] [[ -z '' ]] +epprd_rg:cl_export_fs[643] export_line=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[644] USING_EXPORTS_FILE=1 +epprd_rg:cl_export_fs[648] : In case of multiple exports for same filesystem +epprd_rg:cl_export_fs[649] : Process them line by line +epprd_rg:cl_export_fs[651] set +u +epprd_rg:cl_export_fs[652] oldifs=$' \t\n' +epprd_rg:cl_export_fs[653] IFS=$'\n' +epprd_rg:cl_export_fs[653] export_lines=( ' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' ) +epprd_rg:cl_export_fs[654] IFS=$' \t\n' +epprd_rg:cl_export_fs[656] [ -n ' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' ] +epprd_rg:cl_export_fs[661] : The line is of the format: filesystem -option1,option2,... +epprd_rg:cl_export_fs[662] : This will give 'option1 option2 ...' +epprd_rg:cl_export_fs[664] echo /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_export_fs[664] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_export_fs[664] cut -d- -f2- +epprd_rg:cl_export_fs[665] tr , ' ' +epprd_rg:cl_export_fs[664] old_options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap root=epprdap ' +epprd_rg:cl_export_fs[668] : Each option can be of the format name=value, or just name. +epprd_rg:cl_export_fs[669] : We care about the hasrv, vers, and root options. +epprd_rg:cl_export_fs[673] : Loop through all of the export options for this export. +epprd_rg:cl_export_fs[700] [[ sec=sys:krb5p:krb5i:krb5:dh == -* ]] +epprd_rg:cl_export_fs[708] [[ sec=sys:krb5p:krb5i:krb5:dh == /sapmnt/EPP ]] +epprd_rg:cl_export_fs[716] echo sec=sys:krb5p:krb5i:krb5:dh +epprd_rg:cl_export_fs[716] cut -d= -f 1 +epprd_rg:cl_export_fs[716] otheroption=sec +epprd_rg:cl_export_fs[717] [[ '' == *sec* ]] +epprd_rg:cl_export_fs[724] : Merge in all remaining options. +epprd_rg:cl_export_fs[726] new_options=,sec=sys:krb5p:krb5i:krb5:dh +epprd_rg:cl_export_fs[700] [[ rw == -* ]] +epprd_rg:cl_export_fs[708] [[ rw == /sapmnt/EPP ]] +epprd_rg:cl_export_fs[716] echo rw +epprd_rg:cl_export_fs[716] cut -d= -f 1 +epprd_rg:cl_export_fs[716] otheroption=rw +epprd_rg:cl_export_fs[717] [[ ,sec=sys:krb5p:krb5i:krb5:dh == *rw* ]] +epprd_rg:cl_export_fs[724] : Merge in all remaining options. +epprd_rg:cl_export_fs[726] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw +epprd_rg:cl_export_fs[700] [[ access=epprdap == -* ]] +epprd_rg:cl_export_fs[708] [[ access=epprdap == /sapmnt/EPP ]] +epprd_rg:cl_export_fs[716] echo access=epprdap +epprd_rg:cl_export_fs[716] cut -d= -f 1 +epprd_rg:cl_export_fs[716] otheroption=access +epprd_rg:cl_export_fs[717] [[ ,sec=sys:krb5p:krb5i:krb5:dh,rw == *access* ]] +epprd_rg:cl_export_fs[724] : Merge in all remaining options. +epprd_rg:cl_export_fs[726] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap +epprd_rg:cl_export_fs[695] : Merge in the root option. +epprd_rg:cl_export_fs[697] echo root=epprdap +epprd_rg:cl_export_fs[697] cut -d= -f2- +epprd_rg:cl_export_fs[697] root=epprdap +epprd_rg:cl_export_fs[733] set -u +epprd_rg:cl_export_fs[736] : At this point, v3 and v4 are set based on what is actually exported +epprd_rg:cl_export_fs[737] : or what is configured to be exported in the exports file. +epprd_rg:cl_export_fs[740] (( USING_EXPORTS_FILE )) +epprd_rg:cl_export_fs[742] v3='' +epprd_rg:cl_export_fs[743] v4='' +epprd_rg:cl_export_fs[747] : At this point, v3 and v4 are set based on what is actually exported. +epprd_rg:cl_export_fs[748] : Now add additional versions if the resource group has them configured. +epprd_rg:cl_export_fs[752] [[ /sapmnt/EPP == /board_org ]] +epprd_rg:cl_export_fs[752] [[ /sapmnt/EPP == /sapmnt/EPP ]] +epprd_rg:cl_export_fs[752] v3=:2:3 +epprd_rg:cl_export_fs[752] break +epprd_rg:cl_export_fs[761] : Versions 2 and 3 are the default versions. Some versions of AIX do +epprd_rg:cl_export_fs[762] : not support the vers export option, so only use the option if we are +epprd_rg:cl_export_fs[763] : exporting a non-default value such as 4 +epprd_rg:cl_export_fs[765] [[ -n '' ]] +epprd_rg:cl_export_fs[779] [[ -n epprdap ]] +epprd_rg:cl_export_fs[782] : If we have root priveliged clients, +epprd_rg:cl_export_fs[783] : then add them to the option list. +epprd_rg:cl_export_fs[785] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_export_fs[788] [[ -n '' ]] +epprd_rg:cl_export_fs[798] : Strip off the leading comma +epprd_rg:cl_export_fs[800] echo ,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_export_fs[800] cut -d, -f2- +epprd_rg:cl_export_fs[800] new_options=sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_export_fs[802] [[ -z sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap ]] +epprd_rg:cl_export_fs[811] : Exporting filesystem /sapmnt/EPP with options sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_export_fs[813] exportfs -i -o sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap /sapmnt/EPP +epprd_rg:cl_export_fs[814] RC=0 +epprd_rg:cl_export_fs[817] (( 0 != 0 )) +epprd_rg:cl_export_fs[834] ALLNOERREXPORT=All_nonerror_exports +epprd_rg:cl_export_fs[836] : update resource manager with results +epprd_rg:cl_export_fs[838] cl_RMupdate resource_up All_nonerror_exports cl_export_fs 2023-01-28T18:07:06.436994 2023-01-28T18:07:06.441296 +epprd_rg:cl_export_fs[840] exit 0 +epprd_rg:process_resources(13.846)[export_filesystems:1662] RC=0 +epprd_rg:process_resources(13.846)[export_filesystems:1663] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources(13.846)[export_filesystems:1669] (( 0 != 0 )) +epprd_rg:process_resources(13.846)[export_filesystems:1675] return 0 +epprd_rg:process_resources(13.846)[3324] true +epprd_rg:process_resources(13.846)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(13.846)[3328] set -a +epprd_rg:process_resources(13.846)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:07:06.454973 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(13.860)[3329] eval JOB_TYPE=TELINIT +epprd_rg:process_resources(13.860)[1] JOB_TYPE=TELINIT +epprd_rg:process_resources(13.860)[3330] RC=0 +epprd_rg:process_resources(13.860)[3331] set +a +epprd_rg:process_resources(13.860)[3333] (( 0 != 0 )) +epprd_rg:process_resources(13.860)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(13.860)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(13.860)[3343] export GROUPNAME +epprd_rg:process_resources(13.860)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(13.860)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(13.860)[3360] [[ TELINIT == RELEASE ]] +epprd_rg:process_resources(13.860)[3360] [[ TELINIT == ONLINE ]] +epprd_rg:process_resources(13.860)[3435] cl_telinit +epprd_rg:cl_telinit[178] version=%I% +epprd_rg:cl_telinit[182] TELINIT_FILE=/usr/es/sbin/cluster/.telinit +epprd_rg:cl_telinit[183] USE_TELINIT_FILE=/usr/es/sbin/cluster/.use_telinit +epprd_rg:cl_telinit[185] [[ -f /usr/es/sbin/cluster/.use_telinit ]] +epprd_rg:cl_telinit[189] USE_TELINIT=0 +epprd_rg:cl_telinit[198] [[ '' == -boot ]] +epprd_rg:cl_telinit[236] cl_lsitab clinit +epprd_rg:cl_telinit[236] 1> /dev/null 2>& 1 +epprd_rg:cl_telinit[239] : telinit a disabled +epprd_rg:cl_telinit[241] return 0 +epprd_rg:process_resources(13.881)[3324] true +epprd_rg:process_resources(13.881)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(13.881)[3328] set -a +epprd_rg:process_resources(13.881)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:07:06.489167 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(13.894)[3329] eval JOB_TYPE=MOUNT_FILESYSTEMS ACTION=ACQUIRE FILE_SYSTEMS='"/board;/board_org"' RESOURCE_GROUPS='"epprd_rg' '"' NFS_NETWORKS='""' NFS_HOSTS='""' IP_LABELS='"epprd"' +epprd_rg:process_resources(13.894)[1] JOB_TYPE=MOUNT_FILESYSTEMS +epprd_rg:process_resources(13.894)[1] ACTION=ACQUIRE +epprd_rg:process_resources(13.894)[1] FILE_SYSTEMS='/board;/board_org' +epprd_rg:process_resources(13.894)[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources(13.894)[1] NFS_NETWORKS='' +epprd_rg:process_resources(13.894)[1] NFS_HOSTS='' +epprd_rg:process_resources(13.894)[1] IP_LABELS=epprd +epprd_rg:process_resources(13.894)[3330] RC=0 +epprd_rg:process_resources(13.894)[3331] set +a +epprd_rg:process_resources(13.894)[3333] (( 0 != 0 )) +epprd_rg:process_resources(13.894)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(13.894)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(13.894)[3343] export GROUPNAME +epprd_rg:process_resources(13.894)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(13.894)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(13.894)[3360] [[ MOUNT_FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources(13.894)[3360] [[ MOUNT_FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources(13.894)[3612] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources(13.894)[3614] mount_nfs_filesystems MOUNT +epprd_rg:process_resources(13.894)[mount_nfs_filesystems:1437] PS4_FUNC=mount_nfs_filesystems +epprd_rg:process_resources(13.894)[mount_nfs_filesystems:1437] typeset PS4_FUNC +epprd_rg:process_resources(13.894)[mount_nfs_filesystems:1438] [[ high == high ]] +epprd_rg:process_resources(13.894)[mount_nfs_filesystems:1438] set -x +epprd_rg:process_resources(13.894)[mount_nfs_filesystems:1440] post_event_member=FALSE +epprd_rg:process_resources(13.894)[mount_nfs_filesystems:1444] [[ epprda == epprda ]] +epprd_rg:process_resources(13.894)[mount_nfs_filesystems:1446] post_event_member=TRUE +epprd_rg:process_resources(13.894)[mount_nfs_filesystems:1447] break +epprd_rg:process_resources(13.894)[mount_nfs_filesystems:1452] : This node will not be in the resource group so do not mount filesystems. +epprd_rg:process_resources(13.894)[mount_nfs_filesystems:1454] [[ TRUE == FALSE ]] +epprd_rg:process_resources(13.894)[mount_nfs_filesystems:1459] STAT=0 +epprd_rg:process_resources(13.894)[mount_nfs_filesystems:1463] export GROUPNAME +epprd_rg:process_resources(13.896)[mount_nfs_filesystems:1465] get_list_head '/board;/board_org' +epprd_rg:process_resources(13.896)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(13.896)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(13.896)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(13.896)[get_list_head:60] set -x +epprd_rg:process_resources(13.897)[get_list_head:61] echo '/board;/board_org' +epprd_rg:process_resources(13.899)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(13.899)[get_list_head:61] IFS=: +epprd_rg:process_resources(13.899)[get_list_head:62] echo '/board;/board_org' +epprd_rg:process_resources(13.901)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(13.898)[mount_nfs_filesystems:1465] read LIST_OF_FILE_SYSTEMS_FOR_RG +epprd_rg:process_resources(13.906)[mount_nfs_filesystems:1466] get_list_tail '/board;/board_org' +epprd_rg:process_resources(13.906)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(13.906)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(13.906)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(13.906)[get_list_tail:68] set -x +epprd_rg:process_resources(13.907)[get_list_tail:69] echo '/board;/board_org' +epprd_rg:process_resources(13.909)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(13.909)[get_list_tail:69] IFS=: +epprd_rg:process_resources(13.909)[get_list_tail:70] echo +epprd_rg:process_resources(13.908)[mount_nfs_filesystems:1466] read FILE_SYSTEMS +epprd_rg:process_resources(13.912)[mount_nfs_filesystems:1468] get_list_head +epprd_rg:process_resources(13.912)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(13.912)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(13.912)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(13.913)[get_list_head:60] set -x +epprd_rg:process_resources(13.913)[get_list_head:61] echo +epprd_rg:process_resources(13.916)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(13.916)[get_list_head:61] IFS=: +epprd_rg:process_resources(13.916)[get_list_head:62] echo +epprd_rg:process_resources(13.918)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(13.915)[mount_nfs_filesystems:1468] read NFS_HOST +epprd_rg:process_resources(13.923)[mount_nfs_filesystems:1469] get_list_tail +epprd_rg:process_resources(13.923)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(13.923)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(13.923)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(13.923)[get_list_tail:68] set -x +epprd_rg:process_resources(13.924)[get_list_tail:69] echo +epprd_rg:process_resources(13.926)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(13.926)[get_list_tail:69] IFS=: +epprd_rg:process_resources(13.926)[get_list_tail:70] echo +epprd_rg:process_resources(13.925)[mount_nfs_filesystems:1469] read NFS_HOSTS +epprd_rg:process_resources(13.929)[mount_nfs_filesystems:1471] get_list_head +epprd_rg:process_resources(13.929)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(13.929)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(13.929)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(13.929)[get_list_head:60] set -x +epprd_rg:process_resources(13.930)[get_list_head:61] echo +epprd_rg:process_resources(13.932)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(13.932)[get_list_head:61] IFS=: +epprd_rg:process_resources(13.933)[get_list_head:62] echo +epprd_rg:process_resources(13.934)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(13.931)[mount_nfs_filesystems:1471] read NFS_NETWORK +epprd_rg:process_resources(13.939)[mount_nfs_filesystems:1472] get_list_tail +epprd_rg:process_resources(13.939)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(13.939)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(13.939)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(13.939)[get_list_tail:68] set -x +epprd_rg:process_resources(13.940)[get_list_tail:69] echo +epprd_rg:process_resources(13.942)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(13.942)[get_list_tail:69] IFS=: +epprd_rg:process_resources(13.942)[get_list_tail:70] echo +epprd_rg:process_resources(13.942)[mount_nfs_filesystems:1472] read NFS_NETWORKS +epprd_rg:process_resources(13.945)[mount_nfs_filesystems:1474] get_list_head epprd +epprd_rg:process_resources(13.945)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(13.945)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(13.945)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(13.945)[get_list_head:60] set -x +epprd_rg:process_resources(13.946)[get_list_head:61] echo epprd +epprd_rg:process_resources(13.948)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(13.948)[get_list_head:61] IFS=: +epprd_rg:process_resources(13.949)[get_list_head:62] echo epprd +epprd_rg:process_resources(13.950)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(13.950)[mount_nfs_filesystems:1474] read LIST_OF_IP_LABELS_FOR_RG +epprd_rg:process_resources(13.955)[mount_nfs_filesystems:1475] get_list_tail epprd +epprd_rg:process_resources(13.956)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(13.956)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(13.956)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(13.956)[get_list_tail:68] set -x +epprd_rg:process_resources(13.957)[get_list_tail:69] echo epprd +epprd_rg:process_resources(13.959)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(13.959)[get_list_tail:69] IFS=: +epprd_rg:process_resources(13.959)[get_list_tail:70] echo +epprd_rg:process_resources(13.958)[mount_nfs_filesystems:1475] read IP_LABELS +epprd_rg:process_resources(13.960)[mount_nfs_filesystems:1477] MOUNT_FILESYSTEM='/board;/board_org' +epprd_rg:process_resources(13.960)[mount_nfs_filesystems:1478] NFSMOUNT_LABEL=epprd +epprd_rg:process_resources(13.960)[mount_nfs_filesystems:1481] : Do the required NFS_mounts. +epprd_rg:process_resources(13.960)[mount_nfs_filesystems:1484] NW_NFSMOUNT_LABEL='' +epprd_rg:process_resources(13.960)[mount_nfs_filesystems:1486] [[ -z '' ]] +epprd_rg:process_resources(13.960)[mount_nfs_filesystems:1488] NFS_HOST=epprda +epprd_rg:process_resources(13.960)[mount_nfs_filesystems:1491] NFSHOST='' +epprd_rg:process_resources(13.960)[mount_nfs_filesystems:1492] [[ -n epprda ]] +epprd_rg:process_resources(13.960)[mount_nfs_filesystems:1494] [[ -n '' ]] +epprd_rg:process_resources(13.960)[mount_nfs_filesystems:1516] [[ MOUNT == REMOUNT ]] +epprd_rg:process_resources(13.960)[mount_nfs_filesystems:1526] ping epprd 1024 1 +epprd_rg:process_resources(13.960)[mount_nfs_filesystems:1526] 1> /dev/null +epprd_rg:process_resources(13.964)[mount_nfs_filesystems:1528] NFSHOST=epprd +epprd_rg:process_resources(13.964)[mount_nfs_filesystems:1529] break +epprd_rg:process_resources(13.964)[mount_nfs_filesystems:1533] [[ -n epprd ]] +epprd_rg:process_resources(13.964)[mount_nfs_filesystems:1536] : activate_nfs will not wait for the mounts to complete +epprd_rg:process_resources(13.964)[mount_nfs_filesystems:1538] cl_activate_nfs 1 epprd '/board;/board_org' +epprd_rg:cl_activate_nfs[68] [[ high == high ]] +epprd_rg:cl_activate_nfs[68] version='1.19.4.2 $Source$' +epprd_rg:cl_activate_nfs[70] . /usr/es/sbin/cluster/events/utils/cl_nfs_utils +epprd_rg:cl_activate_nfs[98] PROGNAME=cl_activate_nfs +epprd_rg:cl_activate_nfs[99] [[ high == high ]] +epprd_rg:cl_activate_nfs[101] set -x +epprd_rg:cl_activate_nfs[102] version=%I +epprd_rg:cl_activate_nfs[105] cl_exports_data='' +epprd_rg:cl_activate_nfs[105] typeset cl_exports_data +epprd_rg:cl_activate_nfs[106] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_activate_nfs[72] set -u +epprd_rg:cl_activate_nfs[242] grep -w ^MOUNT_WLMCNTRL_SELFMANAGE /etc/environment +epprd_rg:cl_activate_nfs[242] export eval +epprd_rg:cl_activate_nfs[244] (( 3 < 3 )) +epprd_rg:cl_activate_nfs[253] ATTEMPTS=1 +epprd_rg:cl_activate_nfs[253] typeset -li ATTEMPTS +epprd_rg:cl_activate_nfs[254] HOST=epprd +epprd_rg:cl_activate_nfs[256] shift 2 +epprd_rg:cl_activate_nfs[261] FILELIST='/board;/board_org' +epprd_rg:cl_activate_nfs[266] print '/board;/board_org' +epprd_rg:cl_activate_nfs[266] grep -q '\;/' +epprd_rg:cl_activate_nfs[271] CROSSMOUNTS=TRUE +epprd_rg:cl_activate_nfs[272] print '/board;/board_org' +epprd_rg:cl_activate_nfs[272] tr ' ' '\n' +epprd_rg:cl_activate_nfs[272] /bin/sort -k 1,1 '-t;' +epprd_rg:cl_activate_nfs[272] MOUNTLIST='/board;/board_org' +epprd_rg:cl_activate_nfs[281] ALLNFS=All_nfs_mounts +epprd_rg:cl_activate_nfs[282] cl_RMupdate resource_acquiring All_nfs_mounts cl_activate_nfs 2023-01-28T18:07:06.611311 2023-01-28T18:07:06.615717 +epprd_rg:cl_activate_nfs[288] odmget -q name='RECOVERY_METHOD AND group=epprd_rg' HACMPresource +epprd_rg:cl_activate_nfs[289] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_activate_nfs[288] METHOD=sequential +epprd_rg:cl_activate_nfs[291] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_activate_nfs[291] odmget -q name='EXPORT_FILESYSTEM AND group=epprd_rg' HACMPresource +epprd_rg:cl_activate_nfs[291] EXPORT_FILESYSTEM=$'/board_org\n/sapmnt/EPP' +epprd_rg:cl_activate_nfs[293] odmget -q name='EXPORT_FILESYSTEM_V4 AND group=epprd_rg' HACMPresource +epprd_rg:cl_activate_nfs[293] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_activate_nfs[293] EXPORT_FILESYSTEM_V4='' +epprd_rg:cl_activate_nfs[302] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_activate_nfs[304] [[ -z '' ]] +epprd_rg:cl_activate_nfs[305] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_activate_nfs[308] VERSION_SOURCE=FILES +epprd_rg:cl_activate_nfs[320] [[ FILES == FILES ]] +epprd_rg:cl_activate_nfs[322] export_v3='' +epprd_rg:cl_activate_nfs[323] export_v4='' +epprd_rg:cl_activate_nfs[330] getline_exports /board_org +epprd_rg:cl_activate_nfs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_activate_nfs[getline_exports:45] line='' +epprd_rg:cl_activate_nfs[getline_exports:45] typeset line +epprd_rg:cl_activate_nfs[getline_exports:46] flag=0 +epprd_rg:cl_activate_nfs[getline_exports:46] typeset -i flag +epprd_rg:cl_activate_nfs[getline_exports:47] fs=/board_org +epprd_rg:cl_activate_nfs[getline_exports:49] [[ -z /board_org ]] +epprd_rg:cl_activate_nfs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_activate_nfs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_activate_nfs[getline_exports:56] read -r line +epprd_rg:cl_activate_nfs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_activate_nfs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_activate_nfs[getline_exports:69] (( 1 == 0 )) +epprd_rg:cl_activate_nfs[getline_exports:74] [[ 0 == 1 ]] +epprd_rg:cl_activate_nfs[getline_exports:56] read -r line +epprd_rg:cl_activate_nfs[getline_exports:89] return 0 +epprd_rg:cl_activate_nfs[331] export_line='' +epprd_rg:cl_activate_nfs[336] echo +epprd_rg:cl_activate_nfs[336] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_activate_nfs[337] cut -d- -f2- +epprd_rg:cl_activate_nfs[337] tr , ' ' +epprd_rg:cl_activate_nfs[336] options='' +epprd_rg:cl_activate_nfs[343] vers_missing=1 +epprd_rg:cl_activate_nfs[366] (( vers_missing )) +epprd_rg:cl_activate_nfs[366] export_v3=' /board_org' +epprd_rg:cl_activate_nfs[330] getline_exports /sapmnt/EPP +epprd_rg:cl_activate_nfs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_activate_nfs[getline_exports:45] line='' +epprd_rg:cl_activate_nfs[getline_exports:45] typeset line +epprd_rg:cl_activate_nfs[getline_exports:46] flag=0 +epprd_rg:cl_activate_nfs[getline_exports:46] typeset -i flag +epprd_rg:cl_activate_nfs[getline_exports:47] fs=/sapmnt/EPP +epprd_rg:cl_activate_nfs[getline_exports:49] [[ -z /sapmnt/EPP ]] +epprd_rg:cl_activate_nfs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_activate_nfs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_activate_nfs[getline_exports:56] read -r line +epprd_rg:cl_activate_nfs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_activate_nfs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:68] grep -q '^[[:space:]]*/sapmnt/EPP[[:space:]]' +epprd_rg:cl_activate_nfs[getline_exports:69] (( 0 == 0 )) +epprd_rg:cl_activate_nfs[getline_exports:71] flag=1 +epprd_rg:cl_activate_nfs[getline_exports:74] [[ 1 == 1 ]] +epprd_rg:cl_activate_nfs[getline_exports:76] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:76] grep -w '\\' +epprd_rg:cl_activate_nfs[getline_exports:76] [[ -n '' ]] +epprd_rg:cl_activate_nfs[getline_exports:81] cl_exports_data=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:82] break +epprd_rg:cl_activate_nfs[getline_exports:89] return 0 +epprd_rg:cl_activate_nfs[331] export_line=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[336] echo /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_activate_nfs[336] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_activate_nfs[337] cut -d- -f2- +epprd_rg:cl_activate_nfs[337] tr , ' ' +epprd_rg:cl_activate_nfs[336] options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap root=epprdap ' +epprd_rg:cl_activate_nfs[343] vers_missing=1 +epprd_rg:cl_activate_nfs[366] (( vers_missing )) +epprd_rg:cl_activate_nfs[366] export_v3=' /board_org /sapmnt/EPP' +epprd_rg:cl_activate_nfs[369] EXPORT_FILESYSTEM=' /board_org /sapmnt/EPP' +epprd_rg:cl_activate_nfs[370] EXPORT_FILESYSTEM_V4='' +epprd_rg:cl_activate_nfs[377] [[ -x /usr/sbin/nfsrgyd ]] +epprd_rg:cl_activate_nfs[378] [[ -n '' ]] +epprd_rg:cl_activate_nfs[379] grep -q vers=4 /etc/filesystems +epprd_rg:cl_activate_nfs[394] [[ TRUE == TRUE ]] +epprd_rg:cl_activate_nfs[411] filesystem=/board_org +epprd_rg:cl_activate_nfs[412] mountpoint=/board +epprd_rg:cl_activate_nfs:/board;/board_org[429] PS4_LOOP='/board;/board_org' +epprd_rg:cl_activate_nfs:/board;/board_org[430] [[ sequential == sequential ]] +epprd_rg:cl_activate_nfs:/board;/board_org[432] nfs_mount 1 epprd /board_org /board +epprd_rg:cl_activate_nfs(0.115):/board;/board_org[nfs_mount:99] (( 4 != 4 )) +epprd_rg:cl_activate_nfs(0.115):/board;/board_org[nfs_mount:108] LIMIT=1 +epprd_rg:cl_activate_nfs(0.115):/board;/board_org[nfs_mount:108] typeset -li LIMIT +epprd_rg:cl_activate_nfs(0.115):/board;/board_org[nfs_mount:109] HOST=epprd +epprd_rg:cl_activate_nfs(0.115):/board;/board_org[nfs_mount:110] FileSystem=/board_org +epprd_rg:cl_activate_nfs(0.115):/board;/board_org[nfs_mount:111] MountPoint=/board +epprd_rg:cl_activate_nfs(0.116):/board;/board_org[nfs_mount:116] mount +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:119] [[ mounted == /board ]] +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:119] [[ --------------- == /board ]] +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.118):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:119] [[ procfs == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:119] [[ ahafs == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:119] [[ /sapcd == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:126] vers='' +epprd_rg:cl_activate_nfs(0.119):/board;/board_org[nfs_mount:127] [[ FILES == ODM ]] +epprd_rg:cl_activate_nfs(0.121):/board;/board_org[nfs_mount:141] lsfs -c -v nfs +epprd_rg:cl_activate_nfs(0.124):/board;/board_org[nfs_mount:141] grep ^/board: +epprd_rg:cl_activate_nfs(0.125):/board;/board_org[nfs_mount:141] cut -d: -f7 +epprd_rg:cl_activate_nfs(0.129):/board;/board_org[nfs_mount:141] OPTIONS=bg,soft,intr,sec=sys,rw +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:142] echo bg,soft,intr,sec=sys,rw +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:142] sed s/+/:/g +epprd_rg:cl_activate_nfs(0.134):/board;/board_org[nfs_mount:142] OPTIONS=bg,soft,intr,sec=sys,rw +epprd_rg:cl_activate_nfs(0.134):/board;/board_org[nfs_mount:144] [[ -z bg,soft,intr,sec=sys,rw ]] +epprd_rg:cl_activate_nfs(0.135):/board;/board_org[nfs_mount:152] print bg,soft,intr,sec=sys,rw +epprd_rg:cl_activate_nfs(0.136):/board;/board_org[nfs_mount:152] grep -q intr +epprd_rg:cl_activate_nfs(0.139):/board;/board_org[nfs_mount:168] [[ -n '' ]] +epprd_rg:cl_activate_nfs(0.139):/board;/board_org[nfs_mount:175] [[ sequential == sequential ]] +epprd_rg:cl_activate_nfs(0.140):/board;/board_org[nfs_mount:177] print bg,soft,intr,sec=sys,rw +epprd_rg:cl_activate_nfs(0.141):/board;/board_org[nfs_mount:177] sed s/bg/fg/g +epprd_rg:cl_activate_nfs(0.144):/board;/board_org[nfs_mount:177] OPTIONS=fg,soft,intr,sec=sys,rw +epprd_rg:cl_activate_nfs(0.145):/board;/board_org[nfs_mount:178] let LIMIT+=4 +epprd_rg:cl_activate_nfs(0.145):/board;/board_org[nfs_mount:184] typeset RC +epprd_rg:cl_activate_nfs(0.145):/board;/board_org[nfs_mount:186] amlog_trace '' 'Activating NFS|/board_org' +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:319] cltime +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:319] DATE=2023-01-28T18:07:06.738512 +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:320] echo '|2023-01-28T18:07:06.738512|INFO: Activating NFS|/board_org' +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_nfs(0.174):/board;/board_org[nfs_mount:187] (( TRIES=0)) +epprd_rg:cl_activate_nfs(0.174):/board;/board_org[nfs_mount:187] (( TRIES' 'Activating NFS|/board_org' +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:319] cltime +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:319] DATE=2023-01-28T18:07:16.793650 +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:320] echo '|2023-01-28T18:07:16.793650|INFO: Activating NFS|/board_org' +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_nfs(10.229):/board;/board_org[nfs_mount:203] return 0 +epprd_rg:process_resources(24.198)[mount_nfs_filesystems:1540] RC=0 +epprd_rg:process_resources(24.198)[mount_nfs_filesystems:1541] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources(24.198)[mount_nfs_filesystems:1549] (( 0 != 0 )) +epprd_rg:process_resources(24.199)[mount_nfs_filesystems:1565] return 0 +epprd_rg:process_resources(24.199)[3324] true +epprd_rg:process_resources(24.199)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(24.199)[3328] set -a +epprd_rg:process_resources(24.199)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:07:16.807039 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(24.212)[3329] eval JOB_TYPE=NONE +epprd_rg:process_resources(24.212)[1] JOB_TYPE=NONE +epprd_rg:process_resources(24.212)[3330] RC=0 +epprd_rg:process_resources(24.212)[3331] set +a +epprd_rg:process_resources(24.212)[3333] (( 0 != 0 )) +epprd_rg:process_resources(24.212)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(24.212)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(24.212)[3343] export GROUPNAME +epprd_rg:process_resources(24.212)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(24.212)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(24.212)[3360] [[ NONE == RELEASE ]] +epprd_rg:process_resources(24.212)[3360] [[ NONE == ONLINE ]] +epprd_rg:process_resources(24.212)[3729] break +epprd_rg:process_resources(24.212)[3740] : If sddsrv was turned off above, turn it back on again +epprd_rg:process_resources(24.212)[3742] [[ FALSE == TRUE ]] +epprd_rg:process_resources(24.212)[3747] exit 0 :rg_move[247] : unsetting AM_SYNC_CALLED_BY from $'callers environment as\n: we dont' require it after this point in execution. :rg_move[250] unset AM_SYNC_CALLED_BY :rg_move[253] [[ -f /tmp/.NFSSTOPPED ]] :rg_move[274] [[ -f /tmp/.RPCLOCKDSTOPPED ]] :rg_move[276] rm -f /tmp/.RPCLOCKDSTOPPED :rg_move[277] ATTEMPT=0 :rg_move[277] typeset -li ATTEMPT :rg_move[278] (( ATTEMPT++ < 60 )) :rg_move[280] : rpc.lockd status check :rg_move[281] lssrc -s rpc.lockd :rg_move[281] LC_ALL=C :rg_move[281] grep stopping :rg_move[282] (( 1 == 0 )) :rg_move[282] break :rg_move[285] startsrc -s rpc.lockd 0513-059 The rpc.lockd Subsystem has been started. Subsystem PID is 23790040. :rg_move[286] rcstartsrc=0 :rg_move[287] (( 0 != 0 )) :rg_move[293] exit 0 Jan 28 2023 18:07:16 EVENT COMPLETED: rg_move epprda 1 ACQUIRE 0 |2023-01-28T18:07:16|12511|EVENT COMPLETED: rg_move epprda 1 ACQUIRE 0| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-01-28T18:07:16.927608 :clevlog[amlog_trace:320] echo '|2023-01-28T18:07:16.927608|INFO: rg_move|epprd_rg|epprda|1|ACQUIRE|0' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :rg_move_acquire[+119] exit_status=0 :rg_move_acquire[+120] : exit status of clcallev rg_move epprda 1 ACQUIRE is: 0 :rg_move_acquire[+121] exit 0 Jan 28 2023 18:07:16 EVENT COMPLETED: rg_move_acquire epprda 1 0 |2023-01-28T18:07:17|12511|EVENT COMPLETED: rg_move_acquire epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:07:17.057201 + echo '|2023-01-28T18:07:17.057201|INFO: rg_move_acquire|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 18:07:17 EVENT START: rg_move_complete epprda 1 |2023-01-28T18:07:17|12511|EVENT START: rg_move_complete epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:07:17.253712 + echo '|2023-01-28T18:07:17.253712|INFO: rg_move_complete|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :rg_move_complete[91] version=%I% :rg_move_complete[97] STATUS=0 :rg_move_complete[97] typeset -li STATUS :rg_move_complete[99] [[ -z '' ]] :rg_move_complete[101] EMULATE=REAL :rg_move_complete[104] set -u :rg_move_complete[106] (( 2 < 2 || 2 > 3 )) :rg_move_complete[112] NODENAME=epprda :rg_move_complete[112] export NODENAME :rg_move_complete[113] RGID=1 :rg_move_complete[114] (( 2 == 3 )) :rg_move_complete[118] RGDESTINATION='' :rg_move_complete[122] : serial number for this event is 12511 :rg_move_complete[126] : Interpret resource group ID into a resource group name. :rg_move_complete[128] clodmget -qid=1 -f group -n HACMPgroup :rg_move_complete[128] eval RGNAME=epprd_rg :rg_move_complete[1] RGNAME=epprd_rg +epprd_rg:rg_move_complete[129] GROUPNAME=epprd_rg +epprd_rg:rg_move_complete[131] UPDATESTATD=0 +epprd_rg:rg_move_complete[131] typeset -li UPDATESTATD +epprd_rg:rg_move_complete[132] NFSSTOPPED=0 +epprd_rg:rg_move_complete[132] typeset -li NFSSTOPPED +epprd_rg:rg_move_complete[133] LIMIT=60 +epprd_rg:rg_move_complete[133] WAIT=1 +epprd_rg:rg_move_complete[133] TRY=0 +epprd_rg:rg_move_complete[133] typeset -li LIMIT WAIT TRY +epprd_rg:rg_move_complete[136] : If this is a two node cluster and exported filesystems exist, then +epprd_rg:rg_move_complete[137] : when the cluster topology is stable notify rpc.statd of the changes. +epprd_rg:rg_move_complete[139] clnodename +epprd_rg:rg_move_complete[139] wc -l +epprd_rg:rg_move_complete[139] (( 2 == 2 )) +epprd_rg:rg_move_complete[141] clodmget -f group -n HACMPgroup +epprd_rg:rg_move_complete[141] RESOURCE_GROUPS=epprd_rg +epprd_rg:rg_move_complete[144] clodmget -q group='epprd_rg AND name=EXPORT_FILESYSTEM' -f value -n HACMPresource +epprd_rg:rg_move_complete[144] EXPORTLIST=$'/board_org\n/sapmnt/EPP' +epprd_rg:rg_move_complete[146] [[ -n $'/board_org\n/sapmnt/EPP' ]] +epprd_rg:rg_move_complete[146] [[ epprd_rg == epprd_rg ]] +epprd_rg:rg_move_complete[148] UPDATESTATD=1 +epprd_rg:rg_move_complete[149] [[ REAL == EMUL ]] +epprd_rg:rg_move_complete[154] cl_update_statd :cl_update_statd(0)[+174] version=%I% :cl_update_statd(0)[+176] typeset -i RC=0 :cl_update_statd(0)[+178] LOCAL_FOUND= :cl_update_statd(0)[+179] TWIN_NAME= :cl_update_statd(0)[+180] [[ -z epprda ]] :cl_update_statd(0)[+181] :cl_update_statd(0)[+181] cl_get_path -S OP_SEP=~ :cl_update_statd(0)[+182] set -u :cl_update_statd(0)[+187] LOCAL_FOUND=true :cl_update_statd(0)[+194] : Make sure statd is running locally :cl_update_statd(0)[+196] lssrc -s statd :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw inoperative :cl_update_statd(0)[+196] rpcinfo -p :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw status :cl_update_statd(0)[+207] : Get the current twin, if there is one :cl_update_statd(0)[+209] :cl_update_statd(0)[+209] nfso -H sm_gethost :cl_update_statd(0)[+209] 2>& 1 CURTWIN=epprds :cl_update_statd(0)[+210] RC=0 :cl_update_statd(0)[+212] [[ -z true ]] :cl_update_statd(0)[+212] [[ -z ]] :cl_update_statd(0)[+215] : Local node is no longer a cluster member, unregister its twin :cl_update_statd(0)[+215] [[ -n epprds ]] :cl_update_statd(0)[+219] nfso -H sm_unregister epprds :cl_update_statd(0)[+220] RC=0 :cl_update_statd(0)[+259] : RC is actually 0 :cl_update_statd(0)[+266] return 0 +epprd_rg:rg_move_complete[155] (( 0 != 0 )) +epprd_rg:rg_move_complete[160] break +epprd_rg:rg_move_complete[166] : Set the RESOURCE_GROUPS environment variable with the names +epprd_rg:rg_move_complete[167] : of all resource groups participating in this event, and export +epprd_rg:rg_move_complete[168] : them to all successive scripts. +epprd_rg:rg_move_complete[170] set -a +epprd_rg:rg_move_complete[171] clsetenvgrp epprda rg_move_complete epprd_rg :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move_complete epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 +epprd_rg:rg_move_complete[171] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_complete[172] RC=0 +epprd_rg:rg_move_complete[173] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_complete[1] FORCEDOWN_GROUPS='' +epprd_rg:rg_move_complete[2] RESOURCE_GROUPS='' +epprd_rg:rg_move_complete[3] HOMELESS_GROUPS='' +epprd_rg:rg_move_complete[4] HOMELESS_FOLLOWER_GROUPS='' +epprd_rg:rg_move_complete[5] ERRSTATE_GROUPS='' +epprd_rg:rg_move_complete[6] PRINCIPAL_ACTIONS='' +epprd_rg:rg_move_complete[7] ASSOCIATE_ACTIONS='' +epprd_rg:rg_move_complete[8] AUXILLIARY_ACTIONS='' +epprd_rg:rg_move_complete[8] SIBLING_GROUPS='' +epprd_rg:rg_move_complete[9] SIBLING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[10] SIBLING_ACQUIRING_GROUPS='' +epprd_rg:rg_move_complete[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[12] SIBLING_RELEASING_GROUPS='' +epprd_rg:rg_move_complete[13] SIBLING_RELEASING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[174] set +a +epprd_rg:rg_move_complete[175] (( 0 != 0 )) +epprd_rg:rg_move_complete[182] : For each participating resource group, serially process the resources. +epprd_rg:rg_move_complete[251] (( 1 == 1 )) +epprd_rg:rg_move_complete[253] [[ REAL == EMUL ]] +epprd_rg:rg_move_complete[259] stopsrc -s rpc.lockd 0513-044 The rpc.lockd Subsystem was requested to stop. +epprd_rg:rg_move_complete[260] rcstopsrc=0 +epprd_rg:rg_move_complete[261] (( 0 != 0 )) +epprd_rg:rg_move_complete[266] (( TRY=0)) +epprd_rg:rg_move_complete[266] (( 0<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 1<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 2<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 3<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 4<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 5<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 6<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 7<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 8<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 9<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 10<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 11<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 12<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 13<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 14<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 15<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 16<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 17<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 18<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 19<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 20<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z '' ]] +epprd_rg:rg_move_complete[273] break +epprd_rg:rg_move_complete[277] [[ ! -z '' ]] +epprd_rg:rg_move_complete[300] : Sure that rpc.lockd stopped. Restart it. +epprd_rg:rg_move_complete[302] startsrc -s rpc.lockd 0513-059 The rpc.lockd Subsystem has been started. Subsystem PID is 20185458. +epprd_rg:rg_move_complete[303] rcstartsrc=0 +epprd_rg:rg_move_complete[304] (( 0 != 0 )) +epprd_rg:rg_move_complete[365] : If the resource group in this rg_move is now homeless, +epprd_rg:rg_move_complete[366] : then we need to put it into an error state. +epprd_rg:rg_move_complete[368] active_node=0 +epprd_rg:rg_move_complete[428] : If the resource group in this rg_move is now homeless_secondary, +epprd_rg:rg_move_complete[429] : then we need to put it into an errorsecondary state. +epprd_rg:rg_move_complete[437] : Set an error state for concurrent groups that have +epprd_rg:rg_move_complete[438] : been brought offline on this node by rg_move. +epprd_rg:rg_move_complete[453] AM_SYNC_CALLED_BY=RG_MOVE_COMPLETE +epprd_rg:rg_move_complete[453] export AM_SYNC_CALLED_BY +epprd_rg:rg_move_complete[454] process_resources :process_resources[3318] version=1.169 :process_resources[3321] STATUS=0 :process_resources[3322] sddsrv_off=FALSE :process_resources[3324] true :process_resources[3326] : call rgpa, and it will tell us what to do next :process_resources[3328] set -a :process_resources[3329] clRGPA :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa 2023-01-28T18:07:37.557688 clrgpa :clRGPA[+55] exit 0 :process_resources[3329] eval JOB_TYPE=SYNC_VGS ACTION=ACQUIRE VOLUME_GROUPS='"datavg"' RESOURCE_GROUPS='"epprd_rg' '"' :process_resources[1] JOB_TYPE=SYNC_VGS :process_resources[1] ACTION=ACQUIRE :process_resources[1] VOLUME_GROUPS=datavg :process_resources[1] RESOURCE_GROUPS='epprd_rg ' :process_resources[3330] RC=0 :process_resources[3331] set +a :process_resources[3333] (( 0 != 0 )) :process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ SYNC_VGS == RELEASE ]] +epprd_rg:process_resources[3360] [[ SYNC_VGS == ONLINE ]] +epprd_rg:process_resources[3474] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[3476] sync_volume_groups +epprd_rg:process_resources[sync_volume_groups:2699] PS4_FUNC=sync_volume_groups +epprd_rg:process_resources[sync_volume_groups:2699] typeset PS4_FUNC +epprd_rg:process_resources[sync_volume_groups:2700] [[ high == high ]] +epprd_rg:process_resources[sync_volume_groups:2700] set -x +epprd_rg:process_resources[sync_volume_groups:2701] STAT=0 +epprd_rg:process_resources[sync_volume_groups:2704] export GROUPNAME +epprd_rg:process_resources[sync_volume_groups:2706] get_list_head datavg +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo datavg +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo datavg +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[sync_volume_groups:2706] read LIST_OF_VOLUME_GROUPS_FOR_RG +epprd_rg:process_resources[sync_volume_groups:2707] get_list_tail datavg +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo datavg +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[sync_volume_groups:2707] read VOLUME_GROUPS +epprd_rg:process_resources[sync_volume_groups:2710] : Sync the active volume groups +epprd_rg:process_resources[sync_volume_groups:2712] lsvg -L -o +epprd_rg:process_resources[sync_volume_groups:2712] 2> /tmp/lsvg.err +epprd_rg:process_resources[sync_volume_groups:2712] sort +epprd_rg:process_resources[sync_volume_groups:2712] 1> /tmp/lsvg.out.23790048 +epprd_rg:process_resources[sync_volume_groups:2713] echo datavg +epprd_rg:process_resources[sync_volume_groups:2713] tr ' ' '\n' +epprd_rg:process_resources[sync_volume_groups:2714] sort +epprd_rg:process_resources[sync_volume_groups:2714] comm -12 /tmp/lsvg.out.23790048 - +epprd_rg:process_resources[sync_volume_groups:2716] cl_sync_vgs datavg +epprd_rg:process_resources[sync_volume_groups:2718] [[ -s /tmp/lsvg.err ]] +epprd_rg:process_resources[sync_volume_groups:2723] rm -f /tmp/lsvg.out.23790048 /tmp/lsvg.err +epprd_rg:cl_sync_vgs[303] version=1.24.1.4 +epprd_rg:cl_sync_vgs[306] (( 1 == 0 )) +epprd_rg:cl_sync_vgs[312] : syncing 4 stale PPs at a time seems to be a win most of the time, but +epprd_rg:cl_sync_vgs[313] : we honor the NUM_PARALLEL_LPS value from /etc/environment, as does +epprd_rg:cl_sync_vgs[314] : syncvg. +epprd_rg:cl_sync_vgs[316] syncflag='' +epprd_rg:cl_sync_vgs[316] export syncflag +epprd_rg:cl_sync_vgs[317] PS4_LOOP='' +epprd_rg:cl_sync_vgs[317] export PS4_LOOP +epprd_rg:cl_sync_vgs[318] typeset -i npl +epprd_rg:cl_sync_vgs[319] grep -q ^NUM_PARALLEL_LPS= /etc/environment +epprd_rg:cl_sync_vgs[321] syncflag=-P4 +epprd_rg:cl_sync_vgs[328] echo 'NOTE: While the sync is going on, volume group can be used' NOTE: While the sync is going on, volume group can be used +epprd_rg:cl_sync_vgs[331] : For GLVM volume groups, read PARALLEL LPS count from HACMPresource if it is set from GUI, +epprd_rg:cl_sync_vgs[332] : else, read from environment variables, if it is not set use 32 as default value. +epprd_rg:cl_sync_vgs[334] clodmget -q name='GMVG_REP_RESOURCE and value=datavg' -f group HACMPresource +epprd_rg:cl_sync_vgs[334] 2> /dev/null +epprd_rg:cl_sync_vgs[334] glvm_rg='' +epprd_rg:cl_sync_vgs[335] [[ -n '' ]] +epprd_rg:cl_sync_vgs[353] check_sync datavg +epprd_rg:cl_sync_vgs(0.019):datavg[check_sync:76] typeset vg_name +epprd_rg:cl_sync_vgs(0.019):datavg[check_sync:77] typeset vgid +epprd_rg:cl_sync_vgs(0.019):datavg[check_sync:78] typeset disklist +epprd_rg:cl_sync_vgs(0.019):datavg[check_sync:79] typeset lv_name +epprd_rg:cl_sync_vgs(0.019):datavg[check_sync:80] typeset -li stale_count +epprd_rg:cl_sync_vgs(0.019):datavg[check_sync:81] typeset -li mode +epprd_rg:cl_sync_vgs(0.019):datavg[check_sync:82] RC=0 +epprd_rg:cl_sync_vgs(0.019):datavg[check_sync:82] typeset -li RC +epprd_rg:cl_sync_vgs(0.019):datavg[check_sync:83] typeset site_node_list +epprd_rg:cl_sync_vgs(0.019):datavg[check_sync:84] typeset site_choice +epprd_rg:cl_sync_vgs(0.019):datavg[check_sync:86] vg_name=datavg +epprd_rg:cl_sync_vgs(0.020):datavg[check_sync:87] disklist='' +epprd_rg:cl_sync_vgs(0.020):datavg[check_sync:89] getlvodm -v datavg +epprd_rg:process_resources[sync_volume_groups:2732] unset AM_SYNC_CALLED_BY +epprd_rg:process_resources[sync_volume_groups:2734] return 0 +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:cl_sync_vgs(0.026):datavg[check_sync:89] vgid=00c44af100004b00000001851e9dc053 +epprd_rg:cl_sync_vgs(0.026):datavg[check_sync:92] : find disks in the VG that LVM thinks are inaccessable +epprd_rg:cl_sync_vgs(0.026):datavg[check_sync:94] lsvg -L -p datavg +epprd_rg:cl_sync_vgs(0.026):datavg[check_sync:94] LC_ALL=C +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:07:37.644904 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=APPLICATIONS ACTION=ACQUIRE ALL_APPLICATIONS='"epprd_app"' RESOURCE_GROUPS='"epprd_rg' '"' MISCDATA='""' +epprd_rg:process_resources[1] JOB_TYPE=APPLICATIONS +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] ALL_APPLICATIONS=epprd_app +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] MISCDATA='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ APPLICATIONS == RELEASE ]] +epprd_rg:process_resources[3360] [[ APPLICATIONS == ONLINE ]] +epprd_rg:process_resources[3549] process_applications ACQUIRE +epprd_rg:process_resources[process_applications:312] PS4_FUNC=process_applications +epprd_rg:process_resources[process_applications:312] typeset PS4_FUNC +epprd_rg:process_resources[process_applications:313] [[ high == high ]] +epprd_rg:process_resources[process_applications:313] set -x +epprd_rg:process_resources[process_applications:316] : Each subprocess will log to a file with this name and PID +epprd_rg:process_resources[process_applications:318] TMP_FILE=/var/hacmp/log/.process_resources_applications.23790048 +epprd_rg:process_resources[process_applications:318] export TMP_FILE +epprd_rg:process_resources[process_applications:320] rm -f '/var/hacmp/log/.process_resources_applications*' +epprd_rg:process_resources[process_applications:322] WAITPIDS='' +epprd_rg:process_resources[process_applications:323] LPAR_ACQUIRE_FAILED=0 +epprd_rg:process_resources[process_applications:324] LPAR_RELEASE_FAILED=0 +epprd_rg:process_resources[process_applications:325] START_STOP_FAILED=0 +epprd_rg:process_resources[process_applications:326] LIST_OF_APPS=epprd_app +epprd_rg:process_resources[process_applications:329] : Acquire lpar resources in one-shot before starting applications +epprd_rg:process_resources[process_applications:331] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[process_applications:333] GROUPNAME=epprd_rg +epprd_rg:process_resources[process_applications:333] export GROUPNAME +epprd_rg:process_resources[process_applications:334] clmanageroha -o acquire -s -l epprd_app +epprd_rg:process_resources[process_applications:334] 3>& 2 +epprd_rg:clmanageroha[318] : version='@(#)' 5881272 43haes/usr/sbin/cluster/events/clmanageroha.sh, 61aha_r726, 2205A_aha726, May 16 2022 12:15 PM +epprd_rg:clmanageroha[321] clodmget -n -f connection_type HACMPhmcparam +epprd_rg:clmanageroha[321] CONN_TYPE=0 +epprd_rg:clmanageroha[321] typeset -i CONN_TYPE +epprd_rg:clmanageroha[323] clodmget -q name='epprda and object like POWERVS_*' -nf name HACMPnode +epprd_rg:clmanageroha[323] 2> /dev/null +epprd_rg:clmanageroha[323] [[ -n '' ]] +epprd_rg:clmanageroha[326] export CONN_TYPE +epprd_rg:clmanageroha[331] roha_session_open -o acquire -s -l epprd_app +epprd_rg:clmanageroha[roha_session_open:131] roha_session.id=26476880 +epprd_rg:clmanageroha[roha_session_open:132] date +epprd_rg:clmanageroha[roha_session_open:132] LC_ALL=C +epprd_rg:clmanageroha[roha_session_open:132] roha_session_log 'Open session 26476880 at Sat Jan 28 18:07:37 KORST 2023' [ROHALOG:26476880:(0.070)] Open session 26476880 at Sat Jan 28 18:07:37 KORST 2023 +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:146] roha_session.operation=acquire +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:143] roha_session.systemmirror_mode=1 +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:149] roha_session.optimal_apps=epprd_app +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:163] [[ acquire != @(acquire|release|adjust) ]] +epprd_rg:clmanageroha[roha_session_open:168] no_roha_apps=0 +epprd_rg:clmanageroha[roha_session_open:168] typeset -i no_roha_apps +epprd_rg:clmanageroha[roha_session_open:169] need_explicit_res_rel=0 +epprd_rg:clmanageroha[roha_session_open:169] typeset -i need_explicit_res_rel +epprd_rg:clmanageroha[roha_session_open:187] [[ -n epprd_app ]] +epprd_rg:clmanageroha[roha_session_open:187] clmgr q roha +epprd_rg:clmanageroha[roha_session_open:187] sort +epprd_rg:clmanageroha[roha_session_open:187] uniq -d +epprd_rg:cl_sync_vgs(0.172):datavg[check_sync:94] disklist=$'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:cl_sync_vgs(0.174):datavg[check_sync:95] print -- $'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:cl_sync_vgs(0.176):datavg[check_sync:95] grep -w missing +epprd_rg:cl_sync_vgs(0.183):datavg[check_sync:95] cut -f1 '-d ' +epprd_rg:cl_sync_vgs(0.190):datavg[check_sync:95] missing_disklist='' +epprd_rg:cl_sync_vgs(0.191):datavg[check_sync:96] print -- $'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:cl_sync_vgs(0.196):datavg[check_sync:96] grep -w removed +epprd_rg:cl_sync_vgs(0.201):datavg[check_sync:96] cut -f1 '-d ' +epprd_rg:cl_sync_vgs(0.205):datavg[check_sync:96] removed_disklist='' +epprd_rg:cl_sync_vgs(0.205):datavg[check_sync:100] : Proceeed if there are some disks that LVM thinks are inaccessable +epprd_rg:cl_sync_vgs(0.205):datavg[check_sync:102] [[ -n '' ]] +epprd_rg:cl_sync_vgs(0.206):datavg[check_sync:102] [[ -n '' ]] +epprd_rg:cl_sync_vgs(0.206):datavg[check_sync:196] : sync if any LVs in the VG that have stale partitions +epprd_rg:cl_sync_vgs(0.206):datavg[check_sync:198] (( 0 == 0 )) +epprd_rg:cl_sync_vgs(0.206):datavg[check_sync:201] : A status of 2,3,5 or 7 indicates the presence of dirty and/or stale partitions +epprd_rg:cl_sync_vgs(0.206):datavg[check_sync:213] is_start_logged=0 +epprd_rg:cl_sync_vgs(0.206):datavg[check_sync:218] at_least_one_sync_success=0 +epprd_rg:cl_sync_vgs(0.206):datavg[check_sync:219] lqueryvg -g 00c44af100004b00000001851e9dc053 -L +epprd_rg:cl_sync_vgs(0.209):datavg[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.311):datavg[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.311):datavg.epprdaloglv[check_sync:221] PS4_LOOP=datavg.epprdaloglv +epprd_rg:cl_sync_vgs(0.311):datavg.epprdaloglv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.311):datavg.epprdaloglv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.311):datavg.epprdaloglv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.311):datavg.epprdaloglv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.311):datavg.epprdaloglv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.311):datavg.saplv[check_sync:221] PS4_LOOP=datavg.saplv +epprd_rg:cl_sync_vgs(0.311):datavg.saplv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.311):datavg.saplv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.311):datavg.saplv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.311):datavg.saplv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.311):datavg.saplv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.311):datavg.sapmntlv[check_sync:221] PS4_LOOP=datavg.sapmntlv +epprd_rg:cl_sync_vgs(0.311):datavg.sapmntlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.311):datavg.sapmntlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.311):datavg.sapmntlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.311):datavg.sapmntlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.311):datavg.sapmntlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.311):datavg.oraclelv[check_sync:221] PS4_LOOP=datavg.oraclelv +epprd_rg:cl_sync_vgs(0.311):datavg.oraclelv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.311):datavg.oraclelv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.311):datavg.oraclelv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.311):datavg.oraclelv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.311):datavg.oraclelv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.311):datavg.epplv[check_sync:221] PS4_LOOP=datavg.epplv +epprd_rg:cl_sync_vgs(0.311):datavg.epplv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.311):datavg.epplv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.311):datavg.epplv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.311):datavg.epplv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.311):datavg.epplv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.311):datavg.oraarchlv[check_sync:221] PS4_LOOP=datavg.oraarchlv +epprd_rg:cl_sync_vgs(0.311):datavg.oraarchlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.311):datavg.oraarchlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.311):datavg.oraarchlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.311):datavg.oraarchlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.311):datavg.oraarchlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.311):datavg.sapdata1lv[check_sync:221] PS4_LOOP=datavg.sapdata1lv +epprd_rg:cl_sync_vgs(0.311):datavg.sapdata1lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.311):datavg.sapdata1lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.311):datavg.sapdata1lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.311):datavg.sapdata1lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.311):datavg.sapdata1lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.311):datavg.sapdata2lv[check_sync:221] PS4_LOOP=datavg.sapdata2lv +epprd_rg:cl_sync_vgs(0.311):datavg.sapdata2lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.311):datavg.sapdata2lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.311):datavg.sapdata2lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.311):datavg.sapdata2lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.311):datavg.sapdata2lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.311):datavg.sapdata3lv[check_sync:221] PS4_LOOP=datavg.sapdata3lv +epprd_rg:cl_sync_vgs(0.311):datavg.sapdata3lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.311):datavg.sapdata3lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.311):datavg.sapdata3lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.311):datavg.sapdata3lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.311):datavg.sapdata3lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.311):datavg.sapdata4lv[check_sync:221] PS4_LOOP=datavg.sapdata4lv +epprd_rg:cl_sync_vgs(0.311):datavg.sapdata4lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.312):datavg.sapdata4lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.312):datavg.sapdata4lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.312):datavg.sapdata4lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.312):datavg.sapdata4lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.312):datavg.boardlv[check_sync:221] PS4_LOOP=datavg.boardlv +epprd_rg:cl_sync_vgs(0.312):datavg.boardlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.312):datavg.boardlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.312):datavg.boardlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.312):datavg.boardlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.312):datavg.boardlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.312):datavg.origlogAlv[check_sync:221] PS4_LOOP=datavg.origlogAlv +epprd_rg:cl_sync_vgs(0.312):datavg.origlogAlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.312):datavg.origlogAlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.312):datavg.origlogAlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.312):datavg.origlogAlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.312):datavg.origlogAlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.312):datavg.origlogBlv[check_sync:221] PS4_LOOP=datavg.origlogBlv +epprd_rg:cl_sync_vgs(0.312):datavg.origlogBlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.312):datavg.origlogBlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.312):datavg.origlogBlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.312):datavg.origlogBlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.312):datavg.origlogBlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogAlv[check_sync:221] PS4_LOOP=datavg.mirrlogAlv +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogAlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogAlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogAlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogAlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogAlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogBlv[check_sync:221] PS4_LOOP=datavg.mirrlogBlv +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogBlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogBlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogBlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogBlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogBlv[check_sync:268] [[ -n RG_MOVE_COMPLETE ]] +epprd_rg:cl_sync_vgs(0.312):datavg.mirrlogBlv[check_sync:268] (( 0 == 1 )) +epprd_rg:cl_sync_vgs[355] exit 0 +epprd_rg:clmanageroha[roha_session_open:187] echo epprd_app +epprd_rg:clmanageroha[roha_session_open:187] sort -u +epprd_rg:clmanageroha[roha_session_open:187] echo '\nepprd_app' +epprd_rg:clmanageroha[roha_session_open:187] [[ -z '' ]] +epprd_rg:clmanageroha[roha_session_open:189] roha_session_log 'INFO: No ROHA configured on applications.\n' [ROHALOG:26476880:(0.555)] INFO: No ROHA configured on applications. [ROHALOG:26476880:(0.555)] +epprd_rg:clmanageroha[roha_session_open:190] no_roha_apps=1 +epprd_rg:clmanageroha[roha_session_open:195] read_tunables +epprd_rg:clmanageroha[roha_session_open:196] echo '' +epprd_rg:clmanageroha[roha_session_open:196] grep -q epprda +epprd_rg:clmanageroha[roha_session_open:197] (( 1 == 0 )) +epprd_rg:clmanageroha[roha_session_open:202] (( 1 == 1 )) +epprd_rg:clmanageroha[roha_session_open:203] roha_session_read_odm_dynresop DLPAR_MEM +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_MEM -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:203] (( 0 == 0.00 )) +epprd_rg:clmanageroha[roha_session_open:204] roha_session_read_odm_dynresop DLPAR_PROCS +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_PROCS -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:204] (( 0 == 0 )) +epprd_rg:clmanageroha[roha_session_open:205] roha_session_read_odm_dynresop DLPAR_PROC_UNITS +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_PROC_UNITS -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:205] (( 0 == 0.00 )) +epprd_rg:clmanageroha[roha_session_open:206] roha_session_log 'INFO: Nothing to be done.\n' [ROHALOG:26476880:(0.611)] INFO: Nothing to be done. [ROHALOG:26476880:(0.611)] +epprd_rg:clmanageroha[roha_session_open:207] exit 0 +epprd_rg:process_resources[process_applications:335] RC=0 +epprd_rg:process_resources[process_applications:336] (( 0 != 0 )) +epprd_rg:process_resources[process_applications:343] (( LPAR_ACQUIRE_FAILED == 0 )) +epprd_rg:process_resources[process_applications:345] : Loop through all groups to start or stop applications +epprd_rg:process_resources[process_applications:348] export GROUPNAME +epprd_rg:process_resources[process_applications:351] : Break out application data +epprd_rg:process_resources[process_applications:353] get_list_head epprd_app +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo epprd_app +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo epprd_app +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[process_applications:353] read LIST_OF_APPLICATIONS_FOR_RG +epprd_rg:process_resources[process_applications:354] get_list_tail epprd_app +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo epprd_app +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[process_applications:354] read ALL_APPLICATIONS +epprd_rg:process_resources[process_applications:356] get_list_head +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[process_applications:356] read MISCDATA_FOR_RG +epprd_rg:process_resources[process_applications:357] get_list_tail +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[process_applications:357] read MISCDATA +epprd_rg:process_resources[process_applications:359] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[process_applications:374] APPLICATIONS=epprd_app +epprd_rg:process_resources[process_applications:374] export APPLICATIONS +epprd_rg:process_resources[process_applications:375] MISC_DATA='' +epprd_rg:process_resources[process_applications:375] export MISC_DATA +epprd_rg:process_resources[process_applications:378] : Now call start_or_stop_applications_for_rg to do the app start/stop. +epprd_rg:process_resources[process_applications:381] start_or_stop_applications_for_rg ACQUIRE /var/hacmp/log/.process_resources_applications.23790048.epprd_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:248] PS4_FUNC=start_or_stop_applications_for_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:248] typeset PS4_FUNC +epprd_rg:process_resources[start_or_stop_applications_for_rg:249] [[ high == high ]] +epprd_rg:process_resources[start_or_stop_applications_for_rg:249] set -x +epprd_rg:process_resources[start_or_stop_applications_for_rg:251] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[start_or_stop_applications_for_rg:253] cmd_to_execute=start_server +epprd_rg:process_resources[start_or_stop_applications_for_rg:259] : File name to store our exit status +epprd_rg:process_resources[start_or_stop_applications_for_rg:261] STATUS_FILE=/var/hacmp/log/.process_resources_applications.23790048.epprd_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:264] : Use clcallev to run the event +epprd_rg:process_resources[start_or_stop_applications_for_rg:266] clcallev start_server epprd_app +epprd_rg:process_resources[process_applications:384] : Add PID of the last bg start_or_stop_applications_for_rg process to WAITPIDS. +epprd_rg:process_resources[process_applications:386] WAITPIDS=' 25559468' +epprd_rg:process_resources[process_applications:390] : Wait for the start_or_stop_applications_for_rg PIDs to finish. +epprd_rg:process_resources[process_applications:393] wait 25559468 Jan 28 2023 18:07:38 EVENT START: start_server epprd_app |2023-01-28T18:07:38|12511|EVENT START: start_server epprd_app| +epprd_rg:start_server[+206] version=%I% +epprd_rg:start_server[+210] export TMP_FILE=/var/hacmp/log/.start_server.26476888 +epprd_rg:start_server[+211] export DCD=/etc/es/objrepos +epprd_rg:start_server[+212] export ACD=/usr/es/sbin/cluster/etc/objrepos/active +epprd_rg:start_server[+214] rm -f /var/hacmp/log/.start_server.26476888 +epprd_rg:start_server[+216] STATUS=0 +epprd_rg:start_server[+220] PROC_RES=false +epprd_rg:start_server[+224] [[ APPLICATIONS != 0 ]] +epprd_rg:start_server[+224] [[ APPLICATIONS != GROUP ]] +epprd_rg:start_server[+225] PROC_RES=true +epprd_rg:start_server[+228] set -u +epprd_rg:start_server[+229] typeset WPARNAME EXEC WPARDIR +epprd_rg:start_server[+230] export WPARNAME EXEC WPARDIR +epprd_rg:start_server[+232] EXEC= +epprd_rg:start_server[+233] WPARNAME= +epprd_rg:start_server[+234] WPARDIR= +epprd_rg:start_server[+237] ALLSERVERS=All_servers +epprd_rg:start_server[+238] ALLNOERRSERV=All_nonerror_servers +epprd_rg:start_server[+239] cl_RMupdate resource_acquiring All_servers start_server 2023-01-28T18:07:38.402774 2023-01-28T18:07:38.407437 +epprd_rg:start_server[+241] +epprd_rg:start_server[+241] clwparname epprd_rg +epprd_rg:clwparname[38] version=1.3.1.1 +epprd_rg:clwparname[44] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clwparname[44] [[ -z '' ]] +epprd_rg:clwparname[44] exit 0 WPARNAME= +epprd_rg:start_server[+243] (( 0 == 0 )) +epprd_rg:start_server[+243] [[ -n ]] +epprd_rg:start_server[+261] wait +epprd_rg:start_server[+258] start_and_monitor_server epprd_app +epprd_rg:start_server[start_and_monitor_server+5] RETURN_STATUS=0 +epprd_rg:start_server[start_and_monitor_server+7] server=epprd_app +epprd_rg:start_server[start_and_monitor_server+12] echo Checking whether epprd_app is already running...\n Checking whether epprd_app is already running... +epprd_rg:start_server[start_and_monitor_server+12] [[ -n ]] +epprd_rg:start_server[start_and_monitor_server+18] cl_app_startup_monitor -s epprd_app -a +epprd_rg:start_server[start_and_monitor_server+21] RETURN_STATUS=1 +epprd_rg:start_server[start_and_monitor_server+22] : exit status of cl_app_startup_monitor is: 1 +epprd_rg:start_server[start_and_monitor_server+22] [[ 1 == 0 ]] +epprd_rg:start_server[start_and_monitor_server+33] echo Application monitor(s) indicate that epprd_app is not active. Continuing with application startup.\n Application monitor(s) indicate that epprd_app is not active. Continuing with application startup. +epprd_rg:start_server[start_and_monitor_server+42] +epprd_rg:start_server[start_and_monitor_server+42] cut -d: -f2 +epprd_rg:start_server[start_and_monitor_server+42] cllsserv -cn epprd_app START=/etc/hacmp/epprd_start.sh +epprd_rg:start_server[start_and_monitor_server+43] +epprd_rg:start_server[start_and_monitor_server+43] cut -d -f1 +epprd_rg:start_server[start_and_monitor_server+43] echo /etc/hacmp/epprd_start.sh START_SCRIPT=/etc/hacmp/epprd_start.sh +epprd_rg:start_server[start_and_monitor_server+44] +epprd_rg:start_server[start_and_monitor_server+44] cut -d: -f4 +epprd_rg:start_server[start_and_monitor_server+44] cllsserv -cn epprd_app START_MODE=background +epprd_rg:start_server[start_and_monitor_server+44] [[ -z background ]] +epprd_rg:start_server[start_and_monitor_server+47] PATTERN=epprda epprd_app +epprd_rg:start_server[start_and_monitor_server+48] RETURN_STATUS=0 +epprd_rg:start_server[start_and_monitor_server+51] amlog_trace Starting application controller in background|epprd_app +epprd_rg:start_server[start_and_monitor_server+200] clcycle clavailability.log +epprd_rg:start_server[start_and_monitor_server+200] 1> /dev/null 2>& 1 +epprd_rg:start_server[start_and_monitor_server+200] +epprd_rg:start_server[start_and_monitor_server+200] cltime DATE=2023-01-28T18:07:38.455508 +epprd_rg:start_server[start_and_monitor_server+200] echo |2023-01-28T18:07:38.455508|INFO: Starting application controller in background|epprd_app +epprd_rg:start_server[start_and_monitor_server+200] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:start_server[start_and_monitor_server+51] [[ -n ]] +epprd_rg:start_server[start_and_monitor_server+51] [[ -z ]] +epprd_rg:start_server[start_and_monitor_server+51] [[ -x /etc/hacmp/epprd_start.sh ]] +epprd_rg:start_server[start_and_monitor_server+60] [ background == background ] +epprd_rg:start_server[start_and_monitor_server+62] date +epprd_rg:start_server[start_and_monitor_server+62] LC_ALL=C +epprd_rg:start_server[start_and_monitor_server+62] echo Running application controller start script for epprd_app in the background at Sat Jan 28 18:07:38 KORST 2023.\n Running application controller start script for epprd_app in the background at Sat Jan 28 18:07:38 KORST 2023. +epprd_rg:start_server[start_and_monitor_server+62] [[ 0 != 0 ]] +epprd_rg:start_server[start_and_monitor_server+62] [[ -n ]] +epprd_rg:start_server[start_and_monitor_server+94] cl_app_startup_monitor -s epprd_app +epprd_rg:start_server[start_and_monitor_server+63] /etc/hacmp/epprd_start.sh +epprd_rg:start_server[start_and_monitor_server+63] ODMDIR=/etc/es/objrepos +epprd_rg:start_server[start_and_monitor_server+97] RETURN_STATUS=0 +epprd_rg:start_server[start_and_monitor_server+98] : exit status of cl_app_startup_monitor is: 0 +epprd_rg:start_server[start_and_monitor_server+98] [[ 0 != 0 ]] +epprd_rg:start_server[start_and_monitor_server+109] echo epprd_app 0 +epprd_rg:start_server[start_and_monitor_server+109] 1> /var/hacmp/log/.start_server.26476888.epprd_app +epprd_rg:start_server[start_and_monitor_server+112] +epprd_rg:start_server[start_and_monitor_server+112] cut -d: -f4 +epprd_rg:start_server[start_and_monitor_server+112] cllsserv -cn epprd_app START_MODE=background +epprd_rg:start_server[start_and_monitor_server+112] [[ background == foreground ]] +epprd_rg:start_server[start_and_monitor_server+132] return 0 +epprd_rg:start_server[+266] +epprd_rg:start_server[+266] cut -d: -f4 +epprd_rg:start_server[+266] cllsserv -cn epprd_app START_MODE=background +epprd_rg:start_server[+267] [ background == background ] +epprd_rg:start_server[+269] +epprd_rg:start_server[+269] cut -f2 -d +epprd_rg:start_server[+269] cat /var/hacmp/log/.start_server.26476888.epprd_app SUCCESS=0 +epprd_rg:start_server[+269] [[ 0 != 0 ]] +epprd_rg:start_server[+274] amlog_trace Starting application controller in background|epprd_app +epprd_rg:start_server[+200] clcycle clavailability.log +epprd_rg:start_server[+200] 1> /dev/null 2>& 1 +epprd_rg:start_server[+200] +epprd_rg:start_server[+200] cltime DATE=2023-01-28T18:07:38.496223 +epprd_rg:start_server[+200] echo |2023-01-28T18:07:38.496223|INFO: Starting application controller in background|epprd_app +epprd_rg:start_server[+200] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:start_server[+276] +epprd_rg:start_server[+276] clodmget -q name = epprd_app -n -f cpu_usage_monitor HACMPserver MACTIVE=no +epprd_rg:start_server[+276] [[ no == yes ]] +epprd_rg:start_server[+292] +epprd_rg:start_server[+292] cut -f2 -d +epprd_rg:start_server[+292] cat /var/hacmp/log/.start_server.26476888.epprd_app SUCCESS=0 +epprd_rg:start_server[+292] [[ 0 != +([0-9]) ]] +epprd_rg:start_server[+297] (( 0 != 0 )) +epprd_rg:start_server[+303] [[ 0 == 0 ]] +epprd_rg:start_server[+306] rm -f /var/hacmp/log/.start_server.26476888.epprd_app +epprd_rg:start_server[+308] cl_RMupdate resource_up All_nonerror_servers start_server 2023-01-28T18:07:38.524696 2023-01-28T18:07:38.529092 +epprd_rg:start_server[+314] exit 0 Jan 28 2023 18:07:38 EVENT COMPLETED: start_server epprd_app 0 |2023-01-28T18:07:38|12511|EVENT COMPLETED: start_server epprd_app 0| +epprd_rg:process_resources[start_or_stop_applications_for_rg:267] RC=0 +epprd_rg:process_resources[start_or_stop_applications_for_rg:269] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[start_or_stop_applications_for_rg:279] (( 0 != 0 )) +epprd_rg:process_resources[start_or_stop_applications_for_rg:291] : Store the result for later accumulation +epprd_rg:process_resources[start_or_stop_applications_for_rg:293] print 'epprd_rg 0' +epprd_rg:process_resources[start_or_stop_applications_for_rg:293] 1>> /var/hacmp/log/.process_resources_applications.23790048.epprd_rg +epprd_rg:process_resources[process_applications:396] : Look at all the status files to see if any were unsuccessful +epprd_rg:process_resources[process_applications:399] cat /var/hacmp/log/.process_resources_applications.23790048.epprd_rg +epprd_rg:process_resources[process_applications:399] read skip SUCCESS rest +epprd_rg:process_resources[process_applications:401] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:411] rm -f /var/hacmp/log/.process_resources_applications.23790048.epprd_rg +epprd_rg:process_resources[process_applications:416] : Release lpar resources in one-shot now that applications are stopped +epprd_rg:process_resources[process_applications:418] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[process_applications:433] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:434] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:435] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:439] return 0 +epprd_rg:process_resources[3550] RC=0 +epprd_rg:process_resources[3551] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:07:38.615870 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=ONLINE RESOURCE_GROUPS='"epprd_rg"' +epprd_rg:process_resources[1] JOB_TYPE=ONLINE +epprd_rg:process_resources[1] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ ONLINE == RELEASE ]] +epprd_rg:process_resources[3360] [[ ONLINE == ONLINE ]] +epprd_rg:process_resources[3363] INFO_STRING='' +epprd_rg:process_resources[3364] clnodename +epprd_rg:process_resources[3373] ENV_VAR=GROUP_epprd_rg_epprda +epprd_rg:process_resources[3374] eval 'echo $GROUP_epprd_rg_epprda' +epprd_rg:process_resources[1] echo WILLBEUPPOSTEVENT +epprd_rg:process_resources[3374] read ENV_VAR +epprd_rg:process_resources[3375] [[ WILLBEUPPOSTEVENT == WILLBEUPPOSTEVENT ]] +epprd_rg:process_resources[3376] INFO_STRING='|DESTINATION=epprda' +epprd_rg:process_resources[3377] IS_SERVICE_STOP=0 +epprd_rg:process_resources[3379] [[ WILLBEUPPOSTEVENT == ISUPPREEVENT ]] +epprd_rg:process_resources[3373] ENV_VAR=GROUP_epprd_rg_epprds +epprd_rg:process_resources[3374] eval 'echo $GROUP_epprd_rg_epprds' +epprd_rg:process_resources[1] echo +epprd_rg:process_resources[3374] read ENV_VAR +epprd_rg:process_resources[3375] [[ '' == WILLBEUPPOSTEVENT ]] +epprd_rg:process_resources[3379] [[ '' == ISUPPREEVENT ]] +epprd_rg:process_resources[3384] (( 1 == 0 && 0 ==0 )) +epprd_rg:process_resources[3673] set_resource_group_state UP +epprd_rg:process_resources[set_resource_group_state:82] PS4_FUNC=set_resource_group_state +epprd_rg:process_resources[set_resource_group_state:82] typeset PS4_FUNC +epprd_rg:process_resources[set_resource_group_state:83] [[ high == high ]] +epprd_rg:process_resources[set_resource_group_state:83] set -x +epprd_rg:process_resources[set_resource_group_state:84] STAT=0 +epprd_rg:process_resources[set_resource_group_state:85] new_status=UP +epprd_rg:process_resources[set_resource_group_state:89] export GROUPNAME +epprd_rg:process_resources[set_resource_group_state:90] [[ UP != DOWN ]] +epprd_rg:process_resources[set_resource_group_state:92] clchdaemons -d clstrmgr_scripts -t resource_locator -n epprda -o epprd_rg -v UP +epprd_rg:process_resources[set_resource_group_state:100] : Resource Manager Updates +epprd_rg:process_resources[set_resource_group_state:116] cl_RMupdate rg_up epprd_rg process_resources 2023-01-28T18:07:38.652694 2023-01-28T18:07:38.657150 +epprd_rg:process_resources[set_resource_group_state:118] amlog_trace '' 'acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:318] clcycle clavailability.log +epprd_rg:process_resources[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:process_resources[amlog_trace:319] cltime +epprd_rg:process_resources[amlog_trace:319] DATE=2023-01-28T18:07:38.686718 +epprd_rg:process_resources[amlog_trace:320] echo '|2023-01-28T18:07:38.686718|INFO: acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:process_resources[set_resource_group_state:153] return 0 +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T18:07:38.698633 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=NONE +epprd_rg:process_resources[1] JOB_TYPE=NONE +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ NONE == RELEASE ]] +epprd_rg:process_resources[3360] [[ NONE == ONLINE ]] +epprd_rg:process_resources[3729] break +epprd_rg:process_resources[3740] : If sddsrv was turned off above, turn it back on again +epprd_rg:process_resources[3742] [[ FALSE == TRUE ]] +epprd_rg:process_resources[3747] exit 0 +epprd_rg:rg_move_complete[455] STATUS=0 +epprd_rg:rg_move_complete[456] : The exit status of process_resources is: 0 +epprd_rg:rg_move_complete[461] unset AM_SYNC_CALLED_BY +epprd_rg:rg_move_complete[462] [[ TRUE == TRUE ]] +epprd_rg:rg_move_complete[491] [[ -z '' ]] +epprd_rg:rg_move_complete[493] RESOURCE_GROUPS=epprd_rg +epprd_rg:rg_move_complete[499] GROUPNAME=epprd_rg +epprd_rg:rg_move_complete[499] export GROUPNAME +epprd_rg:rg_move_complete[501] cl_rrmethods2call postrg_move +epprd_rg:cl_rrmethods2call[56] version=%I% +epprd_rg:cl_rrmethods2call[84] RRMETHODS='' +epprd_rg:cl_rrmethods2call[85] NEED_RR_ENV_VARS=no +epprd_rg:cl_rrmethods2call[124] NEED_RR_ENV_VARS=yes +epprd_rg:cl_rrmethods2call[129] : Set the '*_REP_RESOURCE' variables if needed. +epprd_rg:cl_rrmethods2call[131] [[ yes == yes ]] +epprd_rg:cl_rrmethods2call[133] cllsres +epprd_rg:cl_rrmethods2call[133] 2> /dev/null +epprd_rg:cl_rrmethods2call[133] eval APPLICATIONS='"epprd_app"' EXPORT_FILESYSTEM='"/board_org' '/sapmnt/EPP"' FILESYSTEM='""' FORCED_VARYON='"false"' FSCHECK_TOOL='"fsck"' FS_BEFORE_IPADDR='"false"' MOUNT_FILESYSTEM='"/board;/board_org"' RECOVERY_METHOD='"sequential"' SERVICE_LABEL='"epprd"' SSA_DISK_FENCING='"false"' VG_AUTO_IMPORT='"false"' VOLUME_GROUP='"datavg"' USERDEFINED_RESOURCES='""' +epprd_rg:cl_rrmethods2call[1] APPLICATIONS=epprd_app +epprd_rg:cl_rrmethods2call[1] EXPORT_FILESYSTEM='/board_org /sapmnt/EPP' +epprd_rg:cl_rrmethods2call[1] FILESYSTEM='' +epprd_rg:cl_rrmethods2call[1] FORCED_VARYON=false +epprd_rg:cl_rrmethods2call[1] FSCHECK_TOOL=fsck +epprd_rg:cl_rrmethods2call[1] FS_BEFORE_IPADDR=false +epprd_rg:cl_rrmethods2call[1] MOUNT_FILESYSTEM='/board;/board_org' +epprd_rg:cl_rrmethods2call[1] RECOVERY_METHOD=sequential +epprd_rg:cl_rrmethods2call[1] SERVICE_LABEL=epprd +epprd_rg:cl_rrmethods2call[1] SSA_DISK_FENCING=false +epprd_rg:cl_rrmethods2call[1] VG_AUTO_IMPORT=false +epprd_rg:cl_rrmethods2call[1] VOLUME_GROUP=datavg +epprd_rg:cl_rrmethods2call[1] USERDEFINED_RESOURCES='' +epprd_rg:cl_rrmethods2call[137] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[142] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[147] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[152] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[157] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[162] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[167] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[172] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[182] [[ -z '' ]] +epprd_rg:cl_rrmethods2call[184] typeset sysmgdata +epprd_rg:cl_rrmethods2call[185] typeset reposmgdata +epprd_rg:cl_rrmethods2call[186] [[ -x /usr/es/sbin/cluster/xd_generic/xd_cli/clxd_list_mg_smit ]] +epprd_rg:cl_rrmethods2call[191] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[191] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[197] echo '' +epprd_rg:cl_rrmethods2call[199] return 0 +epprd_rg:rg_move_complete[501] METHODS='' +epprd_rg:rg_move_complete[516] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. +epprd_rg:rg_move_complete[518] exit 0 Jan 28 2023 18:07:38 EVENT COMPLETED: rg_move_complete epprda 1 0 |2023-01-28T18:07:38|12511|EVENT COMPLETED: rg_move_complete epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:07:38.804594 + echo '|2023-01-28T18:07:38.804594|INFO: rg_move_complete|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log PowerHA SystemMirror Event Summary ---------------------------------------------------------------------------- Serial number for this event: 12511 Event: TE_RG_MOVE_ACQUIRE Start time: Sat Jan 28 18:06:51 2023 End time: Sat Jan 28 18:07:38 2023 Action: Resource: Script Name: ---------------------------------------------------------------------------- Acquiring resource group: epprd_rg process_resources Search on: Sat.Jan.28.18:06:52.KORST.2023.process_resources.epprd_rg.ref Acquiring resource: All_service_addrs acquire_service_addr Search on: Sat.Jan.28.18:06:52.KORST.2023.acquire_service_addr.All_service_addrs.epprd_rg.ref Resource online: All_nonerror_service_addrs acquire_service_addr Search on: Sat.Jan.28.18:06:53.KORST.2023.acquire_service_addr.All_nonerror_service_addrs.epprd_rg.ref Acquiring resource: All_volume_groups cl_activate_vgs Search on: Sat.Jan.28.18:06:53.KORST.2023.cl_activate_vgs.All_volume_groups.epprd_rg.ref Resource online: All_nonerror_volume_groups cl_activate_vgs Search on: Sat.Jan.28.18:06:57.KORST.2023.cl_activate_vgs.All_nonerror_volume_groups.epprd_rg.ref Acquiring resource: All_filesystems cl_activate_fs Search on: Sat.Jan.28.18:06:58.KORST.2023.cl_activate_fs.All_filesystems.epprd_rg.ref Resource online: All_non_error_filesystems cl_activate_fs Search on: Sat.Jan.28.18:07:00.KORST.2023.cl_activate_fs.All_non_error_filesystems.epprd_rg.ref Acquiring resource: All_exports cl_export_fs Search on: Sat.Jan.28.18:07:06.KORST.2023.cl_export_fs.All_exports.epprd_rg.ref Resource online: All_nonerror_exports cl_export_fs Search on: Sat.Jan.28.18:07:06.KORST.2023.cl_export_fs.All_nonerror_exports.epprd_rg.ref Acquiring resource: All_nfs_mounts cl_activate_nfs Search on: Sat.Jan.28.18:07:06.KORST.2023.cl_activate_nfs.All_nfs_mounts.epprd_rg.ref Acquiring resource: All_servers start_server Search on: Sat.Jan.28.18:07:38.KORST.2023.start_server.All_servers.epprd_rg.ref Resource online: All_nonerror_servers start_server Search on: Sat.Jan.28.18:07:38.KORST.2023.start_server.All_nonerror_servers.epprd_rg.ref Resource group online: epprd_rg process_resources Search on: Sat.Jan.28.18:07:38.KORST.2023.process_resources.epprd_rg.ref ---------------------------------------------------------------------------- |EVENT_SUMMARY_START|TE_RG_MOVE_ACQUIRE|2023-01-28T18:06:51|2023-01-28T18:07:38|12511| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:06:52.KORST.2023.process_resources.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:06:52.KORST.2023.acquire_service_addr.All_service_addrs.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:06:53.KORST.2023.acquire_service_addr.All_nonerror_service_addrs.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:06:53.KORST.2023.cl_activate_vgs.All_volume_groups.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:06:57.KORST.2023.cl_activate_vgs.All_nonerror_volume_groups.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:06:58.KORST.2023.cl_activate_fs.All_filesystems.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:07:00.KORST.2023.cl_activate_fs.All_non_error_filesystems.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:07:06.KORST.2023.cl_export_fs.All_exports.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:07:06.KORST.2023.cl_export_fs.All_nonerror_exports.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:07:06.KORST.2023.cl_activate_nfs.All_nfs_mounts.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:07:38.KORST.2023.start_server.All_servers.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:07:38.KORST.2023.start_server.All_nonerror_servers.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.18:07:38.KORST.2023.process_resources.epprd_rg.ref.ref| |EVENT_SUMMARY_END| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 12512 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_NODE_DEP_COMPLETE|2023-01-28T18:07:40|12512| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Jan 28 2023 18:07:40 EVENT START: node_up_complete epprda |2023-01-28T18:07:40|12512|EVENT START: node_up_complete epprda| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:07:40.997031 + echo '|2023-01-28T18:07:40.997031|INFO: node_up_complete|epprda' + 1>> /var/hacmp/availability/clavailability.log + version=%I% + set -a + cllsparam -n epprda + eval NODE_NAME=epprda VERBOSE_LOGGING=high PS4=$'\'${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO]' $'\'' DEBUG_LEVEL=Standard LC_ALL=$'\'C\'' + NODE_NAME=epprda + VERBOSE_LOGGING=high :node_up_complete[1] PS4='${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO] ' :node_up_complete[1] DEBUG_LEVEL=Standard :node_up_complete[1] LC_ALL=C :node_up_complete[80] set +a :node_up_complete[82] NODENAME=epprda :node_up_complete[83] RC=0 :node_up_complete[83] typeset -i RC :node_up_complete[84] UPDATESTATD=0 :node_up_complete[84] typeset -i UPDATESTATD :node_up_complete[86] LPM_IN_PROGRESS_DIR=/var/hacmp/.lpm_in_progress :node_up_complete[86] typeset LPM_IN_PROGRESS_DIR :node_up_complete[87] LPM_IN_PROGRESS_PREFIX=lpm :node_up_complete[87] typeset LPM_IN_PROGRESS_PREFIX :node_up_complete[88] STATE_FILE=/var/hacmp/cl_dr.state :node_up_complete[88] typeset STATE_FILE :node_up_complete[97] STATUS=0 :node_up_complete[99] set -u :node_up_complete[101] (( 1 < 1 )) :node_up_complete[107] START_MODE='' :node_up_complete[107] typeset START_MODE :node_up_complete[108] (( 1 > 1 )) :node_up_complete[114] : serial number for this event is 12512 :node_up_complete[118] RPCLOCKDSTOPPED=0 :node_up_complete[118] typeset -i RPCLOCKDSTOPPED :node_up_complete[119] [[ -f /tmp/.RPCLOCKDSTOPPED ]] :node_up_complete[127] clnodename :node_up_complete[127] wc -l :node_up_complete[127] (( 2 == 2 )) :node_up_complete[129] clodmget -f group -n HACMPgroup :node_up_complete[129] RESOURCE_GROUPS=epprd_rg :node_up_complete[132] clodmget -q group='epprd_rg AND name=EXPORT_FILESYSTEM' -f value -n HACMPresource :node_up_complete[132] EXPORTLIST=$'/board_org\n/sapmnt/EPP' :node_up_complete[133] [[ -n $'/board_org\n/sapmnt/EPP' ]] :node_up_complete[135] UPDATESTATD=1 :node_up_complete[136] [[ epprda == epprda ]] :node_up_complete[139] lssrc -s rpc.statd :node_up_complete[139] LC_ALL=C :node_up_complete[139] grep inoperative :node_up_complete[140] (( 1 == 0 )) :node_up_complete[146] cl_update_statd :cl_update_statd(0)[+174] version=%I% :cl_update_statd(0)[+176] typeset -i RC=0 :cl_update_statd(0)[+178] LOCAL_FOUND= :cl_update_statd(0)[+179] TWIN_NAME= :cl_update_statd(0)[+180] [[ -z epprda ]] :cl_update_statd(0)[+181] :cl_update_statd(0)[+181] cl_get_path -S OP_SEP=~ :cl_update_statd(0)[+182] set -u :cl_update_statd(0)[+187] LOCAL_FOUND=true :cl_update_statd(0)[+194] : Make sure statd is running locally :cl_update_statd(0)[+196] lssrc -s statd :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw inoperative :cl_update_statd(0)[+196] rpcinfo -p :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw status :cl_update_statd(0)[+207] : Get the current twin, if there is one :cl_update_statd(0)[+209] :cl_update_statd(0)[+209] nfso -H sm_gethost :cl_update_statd(0)[+209] 2>& 1 CURTWIN= :cl_update_statd(0)[+210] RC=0 :cl_update_statd(0)[+212] [[ -z true ]] :cl_update_statd(0)[+212] [[ -z ]] :cl_update_statd(0)[+215] : Local node is no longer a cluster member, unregister its twin :cl_update_statd(0)[+215] [[ -n ]] :cl_update_statd(0)[+259] : RC is actually 0 :cl_update_statd(0)[+266] return 0 :node_up_complete[147] (( 0 )) :node_up_complete[151] break :node_up_complete[156] (( 1 )) :node_up_complete[158] (( 0 )) :node_up_complete[198] [[ TRUE == FALSE ]] :node_up_complete[268] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. :node_up_complete[270] : This is the final clRGinfo output :node_up_complete[272] clRGinfo -p -t :node_up_complete[272] 2>& 1 clRGinfo[431]: version I clRGinfo[517]: Number of resource groups = 0 clRGinfo[562]: cluster epprda_cluster is version = 22 clRGinfo[597]: no resource groups specified on command line - print all clRGinfo[685]: Current group is 'epprd_rg' get primary state info for state 6 get secondary state info for state 6 getPrimaryStateStr: using primary_table => primary_state_table get primary state info for state 4 get secondary state info for state 4 getPrimaryStateStr: using primary_table => primary_state_table Cluster Name: epprda_cluster Resource Group Name: epprd_rg Node Group State Delayed Timers ---------------------------------------------------------------- --------------- ------------------- epprda ONLINE epprds OFFLINE :node_up_complete[277] (( 0 == 0 )) :node_up_complete[279] [[ epprda != epprda ]] :node_up_complete[300] exit 0 Jan 28 2023 18:07:41 EVENT COMPLETED: node_up_complete epprda 0 |2023-01-28T18:07:41|12512|EVENT COMPLETED: node_up_complete epprda 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:07:41.198485 + echo '|2023-01-28T18:07:41.198485|INFO: node_up_complete|epprda|0' + 1>> /var/hacmp/availability/clavailability.log PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 8589 Cluster services started on node 'epprds' Node Up Completion Event has been enqueued. ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_NODE_DEP|2023-01-28T18:07:46|8589| |NODE_UP_COMPLETE| |EVENT_PREAMBLE_END| Jan 28 2023 18:07:49 EVENT START: node_up epprds |2023-01-28T18:07:49|8589|EVENT START: node_up epprds| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:07:49.134447 + echo '|2023-01-28T18:07:49.134447|INFO: node_up|epprds' + 1>> /var/hacmp/availability/clavailability.log :node_up[182] version=%I% :node_up[185] NODENAME=epprds :node_up[185] export NODENAME :node_up[193] STATUS=0 :node_up[193] typeset -li STATUS :node_up[194] RC=0 :node_up[194] typeset -li RC :node_up[195] ENABLE_NFS_CROSS_MOUNT=false :node_up[196] START_MODE='' :node_up[196] typeset START_MODE :node_up[198] set -u :node_up[200] (( 1 < 1 )) :node_up[200] (( 1 > 2 )) :node_up[207] : serial number for this event is 8589 :node_up[210] [[ epprda == epprds ]] :node_up[219] (( 1 > 1 )) :node_up[256] : If RG_DEPENDENCIES=false, process RGs with clsetenvgrp :node_up[258] [[ TRUE == FALSE ]] :node_up[281] : localnode processing prior to RG acquisition :node_up[283] [[ epprda == epprds ]] :node_up[498] : Enable NFS crossmounts during manual start :node_up[500] [[ -n false ]] :node_up[500] [[ false == true ]] :node_up[607] : When RG dependencies are not configured we call node_up_local/remote, :node_up[608] : followed by process_resources to process any remaining groups :node_up[610] [[ TRUE == FALSE ]] :node_up[657] [[ epprda == epprds ]] :node_up[667] return 0 Jan 28 2023 18:07:49 EVENT COMPLETED: node_up epprds 0 |2023-01-28T18:07:49|8589|EVENT COMPLETED: node_up epprds 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:07:49.257759 + echo '|2023-01-28T18:07:49.257759|INFO: node_up|epprds|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 18:07:52 EVENT START: rg_move_fence epprds 1 |2023-01-28T18:07:52|8590|EVENT START: rg_move_fence epprds 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:07:52.678481 + echo '|2023-01-28T18:07:52.678481|INFO: rg_move_fence|epprd_rg|epprds|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_fence[62] [[ high == high ]] :rg_move_fence[62] version=1.11 :rg_move_fence[63] NODENAME=epprds :rg_move_fence[63] export NODENAME :rg_move_fence[65] set -u :rg_move_fence[67] [ 2 != 2 ] :rg_move_fence[73] set +u :rg_move_fence[75] [[ -z TRUE ]] :rg_move_fence[80] [[ TRUE == TRUE ]] :rg_move_fence[82] LOCAL_NODENAME=epprda :rg_move_fence[83] odmget -qid=1 HACMPgroup :rg_move_fence[83] egrep 'group =' :rg_move_fence[83] awk '{print $3}' :rg_move_fence[83] eval RGNAME='"epprd_rg"' :rg_move_fence[1] RGNAME=epprd_rg +epprd_rg:rg_move_fence[84] GROUPNAME=epprd_rg +epprd_rg:rg_move_fence[85] group_state='$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[86] set +u +epprd_rg:rg_move_fence[87] eval print '$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[1] print ONLINE +epprd_rg:rg_move_fence[87] RG_MOVE_ONLINE=ONLINE +epprd_rg:rg_move_fence[87] export RG_MOVE_ONLINE +epprd_rg:rg_move_fence[88] set -u +epprd_rg:rg_move_fence[89] RG_MOVE_ONLINE=ONLINE +epprd_rg:rg_move_fence[91] set -a +epprd_rg:rg_move_fence[92] clsetenvgrp epprda rg_move epprd_rg '' :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 +epprd_rg:rg_move_fence[92] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[93] RC=0 +epprd_rg:rg_move_fence[94] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[1] FORCEDOWN_GROUPS='' +epprd_rg:rg_move_fence[2] RESOURCE_GROUPS='' +epprd_rg:rg_move_fence[3] HOMELESS_GROUPS='' +epprd_rg:rg_move_fence[4] HOMELESS_FOLLOWER_GROUPS='' +epprd_rg:rg_move_fence[5] ERRSTATE_GROUPS='' +epprd_rg:rg_move_fence[6] PRINCIPAL_ACTIONS='' +epprd_rg:rg_move_fence[7] ASSOCIATE_ACTIONS='' +epprd_rg:rg_move_fence[8] AUXILLIARY_ACTIONS='' +epprd_rg:rg_move_fence[8] SIBLING_GROUPS='' +epprd_rg:rg_move_fence[9] SIBLING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[10] SIBLING_ACQUIRING_GROUPS='' +epprd_rg:rg_move_fence[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[12] SIBLING_RELEASING_GROUPS='' +epprd_rg:rg_move_fence[13] SIBLING_RELEASING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[95] set +a +epprd_rg:rg_move_fence[96] [ 0 -ne 0 ] +epprd_rg:rg_move_fence[103] process_resources FENCE :rg_move_fence[3318] version=1.169 :rg_move_fence[3321] STATUS=0 :rg_move_fence[3322] sddsrv_off=FALSE :rg_move_fence[3324] true :rg_move_fence[3326] : call rgpa, and it will tell us what to do next :rg_move_fence[3328] set -a :rg_move_fence[3329] clRGPA FENCE :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa FENCE 2023-01-28T18:07:52.782235 clrgpa :clRGPA[+55] exit 0 :rg_move_fence[3329] eval JOB_TYPE=NONE :rg_move_fence[1] JOB_TYPE=NONE :rg_move_fence[3330] RC=0 :rg_move_fence[3331] set +a :rg_move_fence[3333] (( 0 != 0 )) :rg_move_fence[3342] RESOURCE_GROUPS='' :rg_move_fence[3343] GROUPNAME='' :rg_move_fence[3343] export GROUPNAME :rg_move_fence[3353] IS_SERVICE_START=1 :rg_move_fence[3354] IS_SERVICE_STOP=1 :rg_move_fence[3360] [[ NONE == RELEASE ]] :rg_move_fence[3360] [[ NONE == ONLINE ]] :rg_move_fence[3729] break :rg_move_fence[3740] : If sddsrv was turned off above, turn it back on again :rg_move_fence[3742] [[ FALSE == TRUE ]] :rg_move_fence[3747] exit 0 +epprd_rg:rg_move_fence[104] : exit status of process_resources FENCE is: 0 +epprd_rg:rg_move_fence[107] [[ TRUE == TRUE ]] +epprd_rg:rg_move_fence[109] export EVENT_TYPE +epprd_rg:rg_move_fence[110] echo ACQUIRE_PRIMARY_NFS ACQUIRE_PRIMARY_NFS +epprd_rg:rg_move_fence[111] [[ -n '' ]] +epprd_rg:rg_move_fence[141] exit 0 Jan 28 2023 18:07:52 EVENT COMPLETED: rg_move_fence epprds 1 0 |2023-01-28T18:07:52|8590|EVENT COMPLETED: rg_move_fence epprds 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:07:52.874344 + echo '|2023-01-28T18:07:52.874344|INFO: rg_move_fence|epprd_rg|epprds|1|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 18:07:53 EVENT START: rg_move_acquire epprds 1 |2023-01-28T18:07:53|8590|EVENT START: rg_move_acquire epprds 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:07:53.075394 + echo '|2023-01-28T18:07:53.075394|INFO: rg_move_acquire|epprd_rg|epprds|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_acquire[+54] [[ high == high ]] :rg_move_acquire[+54] version=1.9.1.7 :rg_move_acquire[+57] set -u :rg_move_acquire[+59] [ 2 != 2 ] :rg_move_acquire[+65] set +u :rg_move_acquire[+67] :rg_move_acquire[+67] clodmget -n -q id=1 -f group HACMPgroup RG=epprd_rg :rg_move_acquire[+68] export RG :rg_move_acquire[+70] [[ ACQUIRE_PRIMARY_NFS == ACQUIRE_PRIMARY ]] :rg_move_acquire[+118] clcallev rg_move epprds 1 ACQUIRE Jan 28 2023 18:07:53 EVENT START: rg_move epprds 1 ACQUIRE |2023-01-28T18:07:53|8590|EVENT START: rg_move epprds 1 ACQUIRE| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-01-28T18:07:53.202571 :clevlog[amlog_trace:320] echo '|2023-01-28T18:07:53.202571|INFO: rg_move|epprd_rg|epprds|1|ACQUIRE' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :rg_move[76] version=%I% :rg_move[86] STATUS=0 :rg_move[88] [[ ! -n '' ]] :rg_move[90] EMULATE=REAL :rg_move[96] set -u :rg_move[98] NODENAME=epprds :rg_move[98] export NODENAME :rg_move[99] RGID=1 :rg_move[100] (( 3 == 3 )) :rg_move[102] ACTION=ACQUIRE :rg_move[108] : serial number for this event is 8590 :rg_move[112] RG_UP_POSTEVENT_ON_NODE=epprds :rg_move[112] export RG_UP_POSTEVENT_ON_NODE :rg_move[116] clodmget -qid=1 -f group -n HACMPgroup :rg_move[116] eval RGNAME=epprd_rg :rg_move[1] RGNAME=epprd_rg :rg_move[118] UPDATESTATD=0 :rg_move[119] export UPDATESTATD :rg_move[123] RG_MOVE_EVENT=true :rg_move[123] export RG_MOVE_EVENT :rg_move[128] group_state='$RESGRP_epprd_rg_epprda' :rg_move[129] set +u :rg_move[130] eval print '$RESGRP_epprd_rg_epprda' :rg_move[1] print ONLINE :rg_move[130] RG_MOVE_ONLINE=ONLINE :rg_move[130] export RG_MOVE_ONLINE :rg_move[131] set -u :rg_move[132] RG_MOVE_ONLINE=ONLINE :rg_move[139] rm -f /tmp/.NFSSTOPPED :rg_move[140] rm -f /tmp/.RPCLOCKDSTOPPED :rg_move[147] set -a :rg_move[148] clsetenvgrp epprds rg_move epprd_rg :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprds rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 :rg_move[148] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[149] RC=0 :rg_move[150] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[1] FORCEDOWN_GROUPS='' :rg_move[2] RESOURCE_GROUPS='' :rg_move[3] HOMELESS_GROUPS='' :rg_move[4] HOMELESS_FOLLOWER_GROUPS='' :rg_move[5] ERRSTATE_GROUPS='' :rg_move[6] PRINCIPAL_ACTIONS='' :rg_move[7] ASSOCIATE_ACTIONS='' :rg_move[8] AUXILLIARY_ACTIONS='' :rg_move[8] SIBLING_GROUPS='' :rg_move[9] SIBLING_NODES_BY_GROUP='' :rg_move[10] SIBLING_ACQUIRING_GROUPS='' :rg_move[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' :rg_move[12] SIBLING_RELEASING_GROUPS='' :rg_move[13] SIBLING_RELEASING_NODES_BY_GROUP='' :rg_move[151] set +a :rg_move[155] (( 0 != 0 )) :rg_move[155] [[ -z epprd_rg ]] :rg_move[164] [[ -z TRUE ]] :rg_move[241] AM_SYNC_CALLED_BY=RG_MOVE :rg_move[241] export AM_SYNC_CALLED_BY :rg_move[242] process_resources :process_resources[3318] version=1.169 :process_resources[3321] STATUS=0 :process_resources[3322] sddsrv_off=FALSE :process_resources[3324] true :process_resources[3326] : call rgpa, and it will tell us what to do next :process_resources[3328] set -a :process_resources[3329] clRGPA :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa 2023-01-28T18:07:53.323399 clrgpa :clRGPA[+55] exit 0 :process_resources[3329] eval JOB_TYPE=NONE :process_resources[1] JOB_TYPE=NONE :process_resources[3330] RC=0 :process_resources[3331] set +a :process_resources[3333] (( 0 != 0 )) :process_resources[3342] RESOURCE_GROUPS='' :process_resources[3343] GROUPNAME='' :process_resources[3343] export GROUPNAME :process_resources[3353] IS_SERVICE_START=1 :process_resources[3354] IS_SERVICE_STOP=1 :process_resources[3360] [[ NONE == RELEASE ]] :process_resources[3360] [[ NONE == ONLINE ]] :process_resources[3729] break :process_resources[3740] : If sddsrv was turned off above, turn it back on again :process_resources[3742] [[ FALSE == TRUE ]] :process_resources[3747] exit 0 :rg_move[247] : unsetting AM_SYNC_CALLED_BY from $'callers environment as\n: we dont' require it after this point in execution. :rg_move[250] unset AM_SYNC_CALLED_BY :rg_move[253] [[ -f /tmp/.NFSSTOPPED ]] :rg_move[274] [[ -f /tmp/.RPCLOCKDSTOPPED ]] :rg_move[293] exit 0 Jan 28 2023 18:07:53 EVENT COMPLETED: rg_move epprds 1 ACQUIRE 0 |2023-01-28T18:07:53|8590|EVENT COMPLETED: rg_move epprds 1 ACQUIRE 0| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-01-28T18:07:53.452875 :clevlog[amlog_trace:320] echo '|2023-01-28T18:07:53.452875|INFO: rg_move|epprd_rg|epprds|1|ACQUIRE|0' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :rg_move_acquire[+119] exit_status=0 :rg_move_acquire[+120] : exit status of clcallev rg_move epprds 1 ACQUIRE is: 0 :rg_move_acquire[+121] exit 0 Jan 28 2023 18:07:53 EVENT COMPLETED: rg_move_acquire epprds 1 0 |2023-01-28T18:07:53|8590|EVENT COMPLETED: rg_move_acquire epprds 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:07:53.562277 + echo '|2023-01-28T18:07:53.562277|INFO: rg_move_acquire|epprd_rg|epprds|1|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 18:07:53 EVENT START: rg_move_complete epprds 1 |2023-01-28T18:07:53|8590|EVENT START: rg_move_complete epprds 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:07:53.761075 + echo '|2023-01-28T18:07:53.761075|INFO: rg_move_complete|epprd_rg|epprds|1' + 1>> /var/hacmp/availability/clavailability.log :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] grep -w epprda :get_local_nodename[63] clnodename :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :rg_move_complete[91] version=%I% :rg_move_complete[97] STATUS=0 :rg_move_complete[97] typeset -li STATUS :rg_move_complete[99] [[ -z '' ]] :rg_move_complete[101] EMULATE=REAL :rg_move_complete[104] set -u :rg_move_complete[106] (( 2 < 2 || 2 > 3 )) :rg_move_complete[112] NODENAME=epprds :rg_move_complete[112] export NODENAME :rg_move_complete[113] RGID=1 :rg_move_complete[114] (( 2 == 3 )) :rg_move_complete[118] RGDESTINATION='' :rg_move_complete[122] : serial number for this event is 8590 :rg_move_complete[126] : Interpret resource group ID into a resource group name. :rg_move_complete[128] clodmget -qid=1 -f group -n HACMPgroup :rg_move_complete[128] eval RGNAME=epprd_rg :rg_move_complete[1] RGNAME=epprd_rg +epprd_rg:rg_move_complete[129] GROUPNAME=epprd_rg +epprd_rg:rg_move_complete[131] UPDATESTATD=0 +epprd_rg:rg_move_complete[131] typeset -li UPDATESTATD +epprd_rg:rg_move_complete[132] NFSSTOPPED=0 +epprd_rg:rg_move_complete[132] typeset -li NFSSTOPPED +epprd_rg:rg_move_complete[133] LIMIT=60 +epprd_rg:rg_move_complete[133] WAIT=1 +epprd_rg:rg_move_complete[133] TRY=0 +epprd_rg:rg_move_complete[133] typeset -li LIMIT WAIT TRY +epprd_rg:rg_move_complete[136] : If this is a two node cluster and exported filesystems exist, then +epprd_rg:rg_move_complete[137] : when the cluster topology is stable notify rpc.statd of the changes. +epprd_rg:rg_move_complete[139] wc -l +epprd_rg:rg_move_complete[139] clnodename +epprd_rg:rg_move_complete[139] (( 2 == 2 )) +epprd_rg:rg_move_complete[141] clodmget -f group -n HACMPgroup +epprd_rg:rg_move_complete[141] RESOURCE_GROUPS=epprd_rg +epprd_rg:rg_move_complete[144] clodmget -q group='epprd_rg AND name=EXPORT_FILESYSTEM' -f value -n HACMPresource +epprd_rg:rg_move_complete[144] EXPORTLIST=$'/board_org\n/sapmnt/EPP' +epprd_rg:rg_move_complete[146] [[ -n $'/board_org\n/sapmnt/EPP' ]] +epprd_rg:rg_move_complete[146] [[ epprd_rg == epprd_rg ]] +epprd_rg:rg_move_complete[148] UPDATESTATD=1 +epprd_rg:rg_move_complete[149] [[ REAL == EMUL ]] +epprd_rg:rg_move_complete[154] cl_update_statd :cl_update_statd(0)[+174] version=%I% :cl_update_statd(0)[+176] typeset -i RC=0 :cl_update_statd(0)[+178] LOCAL_FOUND= :cl_update_statd(0)[+179] TWIN_NAME= :cl_update_statd(0)[+180] [[ -z epprda ]] :cl_update_statd(0)[+181] :cl_update_statd(0)[+181] cl_get_path -S OP_SEP=~ :cl_update_statd(0)[+182] set -u :cl_update_statd(0)[+187] LOCAL_FOUND=true :cl_update_statd(0)[+189] TWIN_NAME=epprds :cl_update_statd(0)[+194] : Make sure statd is running locally :cl_update_statd(0)[+196] grep -qw inoperative :cl_update_statd(0)[+196] lssrc -s statd :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw status :cl_update_statd(0)[+196] rpcinfo -p :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+207] : Get the current twin, if there is one :cl_update_statd(0)[+209] :cl_update_statd(0)[+209] nfso -H sm_gethost :cl_update_statd(0)[+209] 2>& 1 CURTWIN= :cl_update_statd(0)[+210] RC=0 :cl_update_statd(0)[+212] [[ -z true ]] :cl_update_statd(0)[+212] [[ -z epprds ]] :cl_update_statd(0)[+225] : Get the interface to the twin node :cl_update_statd(0)[+227] :cl_update_statd(0)[+227] get_node_ip epprds :cl_update_statd(0)[+9] (( 1 != 1 )) :cl_update_statd(0)[+15] Twin_Name=epprds :cl_update_statd(0)[+16] NewTwin= :cl_update_statd(0)[+19] : Get the Interface details for every interface on the twin node :cl_update_statd(0)[+20] : Reject interfaces on nodes that are not public boot addresses :cl_update_statd(0)[+21] : because those are the only ones we have state information for :cl_update_statd(0)[+23] :cl_update_statd(0)[+23] cllsif -J ~ -Sw -i epprda :cl_update_statd(0)[+23] LC_ALL=C LOCAL_NETWORK_INFO=epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+25] read adapt type network net_type attrib node ip_addr skip interface skip netmask skip skip prefix ip_family :cl_update_statd(0)[+25] IFS=~ :cl_update_statd(0)[+24] cllsif -J ~ -Sw -i epprds :cl_update_statd(0)[+24] LC_ALL=C :cl_update_statd(0)[+25] [[ public != public ]] :cl_update_statd(0)[+25] [[ boot != boot ]] :cl_update_statd(0)[+33] : Find the state of this candidate :cl_update_statd(0)[+33] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+37] :cl_update_statd(0)[+37] tr ./ xx :cl_update_statd(0)[+37] print 61.81.244.123 addr=i61x81x244x123_epprds :cl_update_statd(0)[+43] eval candidate_state=${i61x81x244x123_epprds:-down} :cl_update_statd(0)[+43] candidate_state=UP :cl_update_statd(0)[+46] : If state is UP, check to see if this node can talk to it :cl_update_statd(0)[+46] [[ UP == UP ]] :cl_update_statd(0)[+50] ping -w 5 -c 1 -q 61.81.244.123 :cl_update_statd(0)[+50] 1> /dev/null :cl_update_statd(0)[+61] echo epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+62] read lcl_adapt lcl_type lcl_network lcl_net_type lcl_attrib lcl_node lcl_ip_addr skip lcl_interface skip lcl_netmask skip skip lcl_prefix lcl_ip_family :cl_update_statd(0)[+62] IFS=~ :cl_update_statd(0)[+61] tr \n :cl_update_statd(0)[+62] [[ net_ether_01 != net_ether_01 ]] :cl_update_statd(0)[+62] [[ boot != boot ]] :cl_update_statd(0)[+62] [[ public != public ]] :cl_update_statd(0)[+62] [[ AF_INET != AF_INET ]] :cl_update_statd(0)[+62] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+71] :cl_update_statd(0)[+71] tr ./ xx :cl_update_statd(0)[+71] print 61.81.244.134 addr=i61x81x244x134_epprda :cl_update_statd(0)[+77] eval lcl_candidate_state=${i61x81x244x134_epprda:-down} :cl_update_statd(0)[+77] lcl_candidate_state=UP :cl_update_statd(0)[+77] [[ UP == UP ]] :cl_update_statd(0)[+81] : epprds is on the same network as an interface that is up :cl_update_statd(0)[+82] : on the local node, and the attributes match. :cl_update_statd(0)[+84] NewTwin=epprds :cl_update_statd(0)[+85] break :cl_update_statd(0)[+85] [[ -n epprds ]] :cl_update_statd(0)[+91] break :cl_update_statd(0)[+91] [[ -z epprds ]] :cl_update_statd(0)[+100] echo epprds :cl_update_statd(0)[+101] return 0 NEWTWIN=epprds :cl_update_statd(0)[+227] [[ -z epprds ]] :cl_update_statd(0)[+227] [[ epprds != ]] :cl_update_statd(0)[+243] : Need to register a new twin :cl_update_statd(0)[+243] [[ -n ]] :cl_update_statd(0)[+251] : Register our new twin, epprds :cl_update_statd(0)[+253] nfso -H sm_register epprds :cl_update_statd(0)[+254] RC=0 :cl_update_statd(0)[+259] : RC is actually 0 :cl_update_statd(0)[+266] return 0 +epprd_rg:rg_move_complete[155] (( 0 != 0 )) +epprd_rg:rg_move_complete[160] break +epprd_rg:rg_move_complete[166] : Set the RESOURCE_GROUPS environment variable with the names +epprd_rg:rg_move_complete[167] : of all resource groups participating in this event, and export +epprd_rg:rg_move_complete[168] : them to all successive scripts. +epprd_rg:rg_move_complete[170] set -a +epprd_rg:rg_move_complete[171] clsetenvgrp epprds rg_move_complete epprd_rg :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprds rg_move_complete epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 +epprd_rg:rg_move_complete[171] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_complete[172] RC=0 +epprd_rg:rg_move_complete[173] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_complete[1] FORCEDOWN_GROUPS='' +epprd_rg:rg_move_complete[2] RESOURCE_GROUPS='' +epprd_rg:rg_move_complete[3] HOMELESS_GROUPS='' +epprd_rg:rg_move_complete[4] HOMELESS_FOLLOWER_GROUPS='' +epprd_rg:rg_move_complete[5] ERRSTATE_GROUPS='' +epprd_rg:rg_move_complete[6] PRINCIPAL_ACTIONS='' +epprd_rg:rg_move_complete[7] ASSOCIATE_ACTIONS='' +epprd_rg:rg_move_complete[8] AUXILLIARY_ACTIONS='' +epprd_rg:rg_move_complete[8] SIBLING_GROUPS='' +epprd_rg:rg_move_complete[9] SIBLING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[10] SIBLING_ACQUIRING_GROUPS='' +epprd_rg:rg_move_complete[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[12] SIBLING_RELEASING_GROUPS='' +epprd_rg:rg_move_complete[13] SIBLING_RELEASING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[174] set +a +epprd_rg:rg_move_complete[175] (( 0 != 0 )) +epprd_rg:rg_move_complete[182] : For each participating resource group, serially process the resources. +epprd_rg:rg_move_complete[251] (( 1 == 1 )) +epprd_rg:rg_move_complete[253] [[ REAL == EMUL ]] +epprd_rg:rg_move_complete[259] stopsrc -s rpc.lockd 0513-044 The rpc.lockd Subsystem was requested to stop. +epprd_rg:rg_move_complete[260] rcstopsrc=0 +epprd_rg:rg_move_complete[261] (( 0 != 0 )) +epprd_rg:rg_move_complete[266] (( TRY=0)) +epprd_rg:rg_move_complete[266] (( 0<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 1<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 2<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 3<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 4<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z '' ]] +epprd_rg:rg_move_complete[273] break +epprd_rg:rg_move_complete[277] [[ ! -z '' ]] +epprd_rg:rg_move_complete[300] : Sure that rpc.lockd stopped. Restart it. +epprd_rg:rg_move_complete[302] startsrc -s rpc.lockd 0513-059 The rpc.lockd Subsystem has been started. Subsystem PID is 23790004. +epprd_rg:rg_move_complete[303] rcstartsrc=0 +epprd_rg:rg_move_complete[304] (( 0 != 0 )) +epprd_rg:rg_move_complete[365] : If the resource group in this rg_move is now homeless, +epprd_rg:rg_move_complete[366] : then we need to put it into an error state. +epprd_rg:rg_move_complete[368] active_node=0 +epprd_rg:rg_move_complete[428] : If the resource group in this rg_move is now homeless_secondary, +epprd_rg:rg_move_complete[429] : then we need to put it into an errorsecondary state. +epprd_rg:rg_move_complete[437] : Set an error state for concurrent groups that have +epprd_rg:rg_move_complete[438] : been brought offline on this node by rg_move. +epprd_rg:rg_move_complete[453] AM_SYNC_CALLED_BY=RG_MOVE_COMPLETE +epprd_rg:rg_move_complete[453] export AM_SYNC_CALLED_BY +epprd_rg:rg_move_complete[454] process_resources :process_resources[3318] version=1.169 :process_resources[3321] STATUS=0 :process_resources[3322] sddsrv_off=FALSE :process_resources[3324] true :process_resources[3326] : call rgpa, and it will tell us what to do next :process_resources[3328] set -a :process_resources[3329] clRGPA :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa 2023-01-28T18:07:57.973934 clrgpa :clRGPA[+55] exit 0 :process_resources[3329] eval JOB_TYPE=NONE :process_resources[1] JOB_TYPE=NONE :process_resources[3330] RC=0 :process_resources[3331] set +a :process_resources[3333] (( 0 != 0 )) :process_resources[3342] RESOURCE_GROUPS='' :process_resources[3343] GROUPNAME='' :process_resources[3343] export GROUPNAME :process_resources[3353] IS_SERVICE_START=1 :process_resources[3354] IS_SERVICE_STOP=1 :process_resources[3360] [[ NONE == RELEASE ]] :process_resources[3360] [[ NONE == ONLINE ]] :process_resources[3729] break :process_resources[3740] : If sddsrv was turned off above, turn it back on again :process_resources[3742] [[ FALSE == TRUE ]] :process_resources[3747] exit 0 +epprd_rg:rg_move_complete[455] STATUS=0 +epprd_rg:rg_move_complete[456] : The exit status of process_resources is: 0 +epprd_rg:rg_move_complete[461] unset AM_SYNC_CALLED_BY +epprd_rg:rg_move_complete[462] [[ TRUE == TRUE ]] +epprd_rg:rg_move_complete[491] [[ -z '' ]] +epprd_rg:rg_move_complete[493] RESOURCE_GROUPS=epprd_rg +epprd_rg:rg_move_complete[499] GROUPNAME=epprd_rg +epprd_rg:rg_move_complete[499] export GROUPNAME +epprd_rg:rg_move_complete[501] cl_rrmethods2call postrg_move +epprd_rg:cl_rrmethods2call[56] version=%I% +epprd_rg:cl_rrmethods2call[84] RRMETHODS='' +epprd_rg:cl_rrmethods2call[85] NEED_RR_ENV_VARS=no +epprd_rg:cl_rrmethods2call[124] NEED_RR_ENV_VARS=yes +epprd_rg:cl_rrmethods2call[129] : Set the '*_REP_RESOURCE' variables if needed. +epprd_rg:cl_rrmethods2call[131] [[ yes == yes ]] +epprd_rg:cl_rrmethods2call[133] cllsres +epprd_rg:cl_rrmethods2call[133] 2> /dev/null +epprd_rg:cl_rrmethods2call[133] eval APPLICATIONS='"epprd_app"' EXPORT_FILESYSTEM='"/board_org' '/sapmnt/EPP"' FILESYSTEM='""' FORCED_VARYON='"false"' FSCHECK_TOOL='"fsck"' FS_BEFORE_IPADDR='"false"' MOUNT_FILESYSTEM='"/board;/board_org"' RECOVERY_METHOD='"sequential"' SERVICE_LABEL='"epprd"' SSA_DISK_FENCING='"false"' VG_AUTO_IMPORT='"false"' VOLUME_GROUP='"datavg"' USERDEFINED_RESOURCES='""' +epprd_rg:cl_rrmethods2call[1] APPLICATIONS=epprd_app +epprd_rg:cl_rrmethods2call[1] EXPORT_FILESYSTEM='/board_org /sapmnt/EPP' +epprd_rg:cl_rrmethods2call[1] FILESYSTEM='' +epprd_rg:cl_rrmethods2call[1] FORCED_VARYON=false +epprd_rg:cl_rrmethods2call[1] FSCHECK_TOOL=fsck +epprd_rg:cl_rrmethods2call[1] FS_BEFORE_IPADDR=false +epprd_rg:cl_rrmethods2call[1] MOUNT_FILESYSTEM='/board;/board_org' +epprd_rg:cl_rrmethods2call[1] RECOVERY_METHOD=sequential +epprd_rg:cl_rrmethods2call[1] SERVICE_LABEL=epprd +epprd_rg:cl_rrmethods2call[1] SSA_DISK_FENCING=false +epprd_rg:cl_rrmethods2call[1] VG_AUTO_IMPORT=false +epprd_rg:cl_rrmethods2call[1] VOLUME_GROUP=datavg +epprd_rg:cl_rrmethods2call[1] USERDEFINED_RESOURCES='' +epprd_rg:cl_rrmethods2call[137] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[142] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[147] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[152] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[157] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[162] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[167] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[172] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[182] [[ -z '' ]] +epprd_rg:cl_rrmethods2call[184] typeset sysmgdata +epprd_rg:cl_rrmethods2call[185] typeset reposmgdata +epprd_rg:cl_rrmethods2call[186] [[ -x /usr/es/sbin/cluster/xd_generic/xd_cli/clxd_list_mg_smit ]] +epprd_rg:cl_rrmethods2call[191] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[191] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[197] echo '' +epprd_rg:cl_rrmethods2call[199] return 0 +epprd_rg:rg_move_complete[501] METHODS='' +epprd_rg:rg_move_complete[516] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. +epprd_rg:rg_move_complete[518] exit 0 Jan 28 2023 18:07:58 EVENT COMPLETED: rg_move_complete epprds 1 0 |2023-01-28T18:07:58|8590|EVENT COMPLETED: rg_move_complete epprds 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:07:58.093862 + echo '|2023-01-28T18:07:58.093862|INFO: rg_move_complete|epprd_rg|epprds|1|0' + 1>> /var/hacmp/availability/clavailability.log PowerHA SystemMirror Event Summary ---------------------------------------------------------------------------- Serial number for this event: 8590 Event: TE_RG_MOVE_ACQUIRE Start time: Sat Jan 28 18:07:52 2023 End time: Sat Jan 28 18:07:58 2023 Action: Resource: Script Name: ---------------------------------------------------------------------------- No resources changed as a result of this event ---------------------------------------------------------------------------- |EVENT_SUMMARY_START|TE_RG_MOVE_ACQUIRE|2023-01-28T18:07:52|2023-01-28T18:07:58|8590| |EVENT_NO_ACTION| |EVENT_SUMMARY_END| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 8590 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_NODE_DEP_COMPLETE|2023-01-28T18:08:00|8590| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Jan 28 2023 18:08:00 EVENT START: node_up_complete epprds |2023-01-28T18:08:00|8590|EVENT START: node_up_complete epprds| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:08:00.295539 + echo '|2023-01-28T18:08:00.295539|INFO: node_up_complete|epprds' + 1>> /var/hacmp/availability/clavailability.log + version=%I% + set -a + cllsparam -n epprda + eval NODE_NAME=epprda VERBOSE_LOGGING=high PS4=$'\'${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO]' $'\'' DEBUG_LEVEL=Standard LC_ALL=$'\'C\'' + NODE_NAME=epprda + VERBOSE_LOGGING=high :node_up_complete[1] PS4='${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO] ' :node_up_complete[1] DEBUG_LEVEL=Standard :node_up_complete[1] LC_ALL=C :node_up_complete[80] set +a :node_up_complete[82] NODENAME=epprds :node_up_complete[83] RC=0 :node_up_complete[83] typeset -i RC :node_up_complete[84] UPDATESTATD=0 :node_up_complete[84] typeset -i UPDATESTATD :node_up_complete[86] LPM_IN_PROGRESS_DIR=/var/hacmp/.lpm_in_progress :node_up_complete[86] typeset LPM_IN_PROGRESS_DIR :node_up_complete[87] LPM_IN_PROGRESS_PREFIX=lpm :node_up_complete[87] typeset LPM_IN_PROGRESS_PREFIX :node_up_complete[88] STATE_FILE=/var/hacmp/cl_dr.state :node_up_complete[88] typeset STATE_FILE :node_up_complete[97] STATUS=0 :node_up_complete[99] set -u :node_up_complete[101] (( 1 < 1 )) :node_up_complete[107] START_MODE='' :node_up_complete[107] typeset START_MODE :node_up_complete[108] (( 1 > 1 )) :node_up_complete[114] : serial number for this event is 8590 :node_up_complete[118] RPCLOCKDSTOPPED=0 :node_up_complete[118] typeset -i RPCLOCKDSTOPPED :node_up_complete[119] [[ -f /tmp/.RPCLOCKDSTOPPED ]] :node_up_complete[127] clnodename :node_up_complete[127] wc -l :node_up_complete[127] (( 2 == 2 )) :node_up_complete[129] clodmget -f group -n HACMPgroup :node_up_complete[129] RESOURCE_GROUPS=epprd_rg :node_up_complete[132] clodmget -q group='epprd_rg AND name=EXPORT_FILESYSTEM' -f value -n HACMPresource :node_up_complete[132] EXPORTLIST=$'/board_org\n/sapmnt/EPP' :node_up_complete[133] [[ -n $'/board_org\n/sapmnt/EPP' ]] :node_up_complete[135] UPDATESTATD=1 :node_up_complete[136] [[ epprds == epprda ]] :node_up_complete[146] cl_update_statd :cl_update_statd(0)[+174] version=%I% :cl_update_statd(0)[+176] typeset -i RC=0 :cl_update_statd(0)[+178] LOCAL_FOUND= :cl_update_statd(0)[+179] TWIN_NAME= :cl_update_statd(0)[+180] [[ -z epprda ]] :cl_update_statd(0)[+181] :cl_update_statd(0)[+181] cl_get_path -S OP_SEP=~ :cl_update_statd(0)[+182] set -u :cl_update_statd(0)[+187] LOCAL_FOUND=true :cl_update_statd(0)[+189] TWIN_NAME=epprds :cl_update_statd(0)[+194] : Make sure statd is running locally :cl_update_statd(0)[+196] lssrc -s statd :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw inoperative :cl_update_statd(0)[+196] rpcinfo -p :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw status :cl_update_statd(0)[+207] : Get the current twin, if there is one :cl_update_statd(0)[+209] :cl_update_statd(0)[+209] nfso -H sm_gethost :cl_update_statd(0)[+209] 2>& 1 CURTWIN=epprds :cl_update_statd(0)[+210] RC=0 :cl_update_statd(0)[+212] [[ -z true ]] :cl_update_statd(0)[+212] [[ -z epprds ]] :cl_update_statd(0)[+225] : Get the interface to the twin node :cl_update_statd(0)[+227] :cl_update_statd(0)[+227] get_node_ip epprds :cl_update_statd(0)[+9] (( 1 != 1 )) :cl_update_statd(0)[+15] Twin_Name=epprds :cl_update_statd(0)[+16] NewTwin= :cl_update_statd(0)[+19] : Get the Interface details for every interface on the twin node :cl_update_statd(0)[+20] : Reject interfaces on nodes that are not public boot addresses :cl_update_statd(0)[+21] : because those are the only ones we have state information for :cl_update_statd(0)[+23] :cl_update_statd(0)[+23] cllsif -J ~ -Sw -i epprda :cl_update_statd(0)[+23] LC_ALL=C LOCAL_NETWORK_INFO=epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+24] cllsif -J ~ -Sw -i epprds :cl_update_statd(0)[+24] LC_ALL=C :cl_update_statd(0)[+25] read adapt type network net_type attrib node ip_addr skip interface skip netmask skip skip prefix ip_family :cl_update_statd(0)[+25] IFS=~ :cl_update_statd(0)[+25] [[ public != public ]] :cl_update_statd(0)[+25] [[ boot != boot ]] :cl_update_statd(0)[+33] : Find the state of this candidate :cl_update_statd(0)[+33] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+37] :cl_update_statd(0)[+37] print 61.81.244.123 :cl_update_statd(0)[+37] tr ./ xx addr=i61x81x244x123_epprds :cl_update_statd(0)[+43] eval candidate_state=${i61x81x244x123_epprds:-down} :cl_update_statd(0)[+43] candidate_state=UP :cl_update_statd(0)[+46] : If state is UP, check to see if this node can talk to it :cl_update_statd(0)[+46] [[ UP == UP ]] :cl_update_statd(0)[+50] ping -w 5 -c 1 -q 61.81.244.123 :cl_update_statd(0)[+50] 1> /dev/null :cl_update_statd(0)[+61] echo epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+61] tr \n :cl_update_statd(0)[+62] read lcl_adapt lcl_type lcl_network lcl_net_type lcl_attrib lcl_node lcl_ip_addr skip lcl_interface skip lcl_netmask skip skip lcl_prefix lcl_ip_family :cl_update_statd(0)[+62] IFS=~ :cl_update_statd(0)[+62] [[ net_ether_01 != net_ether_01 ]] :cl_update_statd(0)[+62] [[ boot != boot ]] :cl_update_statd(0)[+62] [[ public != public ]] :cl_update_statd(0)[+62] [[ AF_INET != AF_INET ]] :cl_update_statd(0)[+62] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+71] :cl_update_statd(0)[+71] print 61.81.244.134 :cl_update_statd(0)[+71] tr ./ xx addr=i61x81x244x134_epprda :cl_update_statd(0)[+77] eval lcl_candidate_state=${i61x81x244x134_epprda:-down} :cl_update_statd(0)[+77] lcl_candidate_state=UP :cl_update_statd(0)[+77] [[ UP == UP ]] :cl_update_statd(0)[+81] : epprds is on the same network as an interface that is up :cl_update_statd(0)[+82] : on the local node, and the attributes match. :cl_update_statd(0)[+84] NewTwin=epprds :cl_update_statd(0)[+85] break :cl_update_statd(0)[+85] [[ -n epprds ]] :cl_update_statd(0)[+91] break :cl_update_statd(0)[+91] [[ -z epprds ]] :cl_update_statd(0)[+100] echo epprds :cl_update_statd(0)[+101] return 0 NEWTWIN=epprds :cl_update_statd(0)[+227] [[ -z epprds ]] :cl_update_statd(0)[+227] [[ epprds != epprds ]] :cl_update_statd(0)[+259] : RC is actually 0 :cl_update_statd(0)[+266] return 0 :node_up_complete[147] (( 0 )) :node_up_complete[151] break :node_up_complete[156] (( 1 )) :node_up_complete[158] (( 0 )) :node_up_complete[198] [[ TRUE == FALSE ]] :node_up_complete[268] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. :node_up_complete[270] : This is the final clRGinfo output :node_up_complete[272] clRGinfo -p -t :node_up_complete[272] 2>& 1 clRGinfo[431]: version I clRGinfo[517]: Number of resource groups = 0 clRGinfo[562]: cluster epprda_cluster is version = 22 clRGinfo[597]: no resource groups specified on command line - print all clRGinfo[685]: Current group is 'epprd_rg' get primary state info for state 6 get secondary state info for state 6 getPrimaryStateStr: using primary_table => primary_state_table get primary state info for state 4 get secondary state info for state 4 getPrimaryStateStr: using primary_table => primary_state_table Cluster Name: epprda_cluster Resource Group Name: epprd_rg Node Group State Delayed Timers ---------------------------------------------------------------- --------------- ------------------- epprda ONLINE epprds OFFLINE :node_up_complete[277] (( 0 == 0 )) :node_up_complete[279] [[ epprds != epprda ]] :node_up_complete[281] grep -w In_progress_file /var/hacmp/cl_dr.state :node_up_complete[281] 2> /dev/null :node_up_complete[281] cut -d= -f2 :node_up_complete[281] lpm_in_progress_file='' :node_up_complete[282] ls '/var/hacmp/.lpm_in_progress/lpm_*' :node_up_complete[282] 2> /dev/null :node_up_complete[282] lpm_in_progress_prefix='' :node_up_complete[283] [[ -n '' ]] :node_up_complete[300] exit 0 Jan 28 2023 18:08:00 EVENT COMPLETED: node_up_complete epprds 0 |2023-01-28T18:08:00|8590|EVENT COMPLETED: node_up_complete epprds 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T18:08:00.533011 + echo '|2023-01-28T18:08:00.533011|INFO: node_up_complete|epprds|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 19:41:10 EVENT START: admin_op user_rg_move 8591 0 |2023-01-28T19:41:10|8591|EVENT START: admin_op user_rg_move 8591 0| :admin_op[110] trap sigint_handler INT :admin_op[116] OP_TYPE=user_rg_move :admin_op[116] typeset OP_TYPE :admin_op[117] SERIAL=8591 :admin_op[117] typeset -li SERIAL :admin_op[118] INVALID=0 :admin_op[118] typeset -li INVALID The administrator initiated the following action at Sat Jan 28 19:41:10 KORST 2023 Check smit.log and clutils.log for additional details. Move a Resource Group to Another Node / SiteAttempting to move resource group epprd_rg to node epprds. Jan 28 2023 19:41:10 EVENT COMPLETED: admin_op user_rg_move 8591 0 0 |2023-01-28T19:41:10|8591|EVENT COMPLETED: admin_op user_rg_move 8591 0 0| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 8591 Enqueued rg_move release event for resource group epprd_rg. Enqueued rg_move acquire event for resource group epprd_rg. Cluster External Resource State Change Complete Event has been enqueued. ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_EXTERNAL_RESOURCE_STATE_CHANGE|2023-01-28T19:41:10|8591| |CLUSTER_RG_MOVE_RELEASE|epprd_rg| |CLUSTER_RG_MOVE_ACQUIRE|epprd_rg| |EXTERNAL_RESOURCE_STATE_CHANGE_COMPLETE| |EVENT_PREAMBLE_END| Jan 28 2023 19:41:10 EVENT START: external_resource_state_change epprds |2023-01-28T19:41:10|8591|EVENT START: external_resource_state_change epprds| :external_resource_state_change[62] version=%I% :external_resource_state_change[65] set -u :external_resource_state_change[67] (( 1 != 1 )) :external_resource_state_change[74] : serial number for this event is 8591 :external_resource_state_change[78] exit 0 Jan 28 2023 19:41:10 EVENT COMPLETED: external_resource_state_change epprds 0 |2023-01-28T19:41:10|8591|EVENT COMPLETED: external_resource_state_change epprds 0| Jan 28 2023 19:41:11 EVENT START: rg_move_release epprda 1 |2023-01-28T19:41:11|8592|EVENT START: rg_move_release epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T19:41:11.272143 + echo '|2023-01-28T19:41:11.272143|INFO: rg_move_release|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_release[+54] [[ high = high ]] :rg_move_release[+54] version=1.6 :rg_move_release[+56] set -u :rg_move_release[+58] [ 2 != 2 ] :rg_move_release[+64] set +u :rg_move_release[+66] clcallev rg_move epprda 1 RELEASE Jan 28 2023 19:41:11 EVENT START: rg_move epprda 1 RELEASE |2023-01-28T19:41:11|8592|EVENT START: rg_move epprda 1 RELEASE| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-01-28T19:41:11.401993 :clevlog[amlog_trace:320] echo '|2023-01-28T19:41:11.401993|INFO: rg_move|epprd_rg|epprda|1|RELEASE' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :rg_move[76] version=%I% :rg_move[86] STATUS=0 :rg_move[88] [[ ! -n '' ]] :rg_move[90] EMULATE=REAL :rg_move[96] set -u :rg_move[98] NODENAME=epprda :rg_move[98] export NODENAME :rg_move[99] RGID=1 :rg_move[100] (( 3 == 3 )) :rg_move[102] ACTION=RELEASE :rg_move[108] : serial number for this event is 8592 :rg_move[112] RG_UP_POSTEVENT_ON_NODE=epprda :rg_move[112] export RG_UP_POSTEVENT_ON_NODE :rg_move[116] clodmget -qid=1 -f group -n HACMPgroup :rg_move[116] eval RGNAME=epprd_rg :rg_move[1] RGNAME=epprd_rg :rg_move[118] UPDATESTATD=0 :rg_move[119] export UPDATESTATD :rg_move[123] RG_MOVE_EVENT=true :rg_move[123] export RG_MOVE_EVENT :rg_move[128] group_state='$RESGRP_epprd_rg_epprda' :rg_move[129] set +u :rg_move[130] eval print '$RESGRP_epprd_rg_epprda' :rg_move[1] print ONLINE :rg_move[130] RG_MOVE_ONLINE=ONLINE :rg_move[130] export RG_MOVE_ONLINE :rg_move[131] set -u :rg_move[132] RG_MOVE_ONLINE=ONLINE :rg_move[139] rm -f /tmp/.NFSSTOPPED :rg_move[140] rm -f /tmp/.RPCLOCKDSTOPPED :rg_move[147] set -a :rg_move[148] clsetenvgrp epprda rg_move epprd_rg :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 :rg_move[148] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[149] RC=0 :rg_move[150] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[1] FORCEDOWN_GROUPS='' :rg_move[2] RESOURCE_GROUPS='' :rg_move[3] HOMELESS_GROUPS='' :rg_move[4] HOMELESS_FOLLOWER_GROUPS='' :rg_move[5] ERRSTATE_GROUPS='' :rg_move[6] PRINCIPAL_ACTIONS='' :rg_move[7] ASSOCIATE_ACTIONS='' :rg_move[8] AUXILLIARY_ACTIONS='' :rg_move[8] SIBLING_GROUPS='' :rg_move[9] SIBLING_NODES_BY_GROUP='' :rg_move[10] SIBLING_ACQUIRING_GROUPS='' :rg_move[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' :rg_move[12] SIBLING_RELEASING_GROUPS='' :rg_move[13] SIBLING_RELEASING_NODES_BY_GROUP='' :rg_move[151] set +a :rg_move[155] (( 0 != 0 )) :rg_move[155] [[ -z epprd_rg ]] :rg_move[164] [[ -z TRUE ]] :rg_move[241] AM_SYNC_CALLED_BY=RG_MOVE :rg_move[241] export AM_SYNC_CALLED_BY :rg_move[242] process_resources :process_resources[3318] version=1.169 :process_resources[3321] STATUS=0 :process_resources[3322] sddsrv_off=FALSE :process_resources[3324] true :process_resources[3326] : call rgpa, and it will tell us what to do next :process_resources[3328] set -a :process_resources[3329] clRGPA :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa 2023-01-28T19:41:11.532179 clrgpa :clRGPA[+55] exit 0 :process_resources[3329] eval JOB_TYPE=RELEASE RESOURCE_GROUPS='"epprd_rg"' PRINCIPAL_ACTION='"RELEASE"' AUXILLIARY_ACTION='"NONE"' :process_resources[1] JOB_TYPE=RELEASE :process_resources[1] RESOURCE_GROUPS=epprd_rg :process_resources[1] PRINCIPAL_ACTION=RELEASE :process_resources[1] AUXILLIARY_ACTION=NONE :process_resources[3330] RC=0 :process_resources[3331] set +a :process_resources[3333] (( 0 != 0 )) :process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3363] INFO_STRING='' +epprd_rg:process_resources[3364] clnodename +epprd_rg:process_resources[3373] ENV_VAR=GROUP_epprd_rg_epprda +epprd_rg:process_resources[3374] eval 'echo $GROUP_epprd_rg_epprda' +epprd_rg:process_resources[1] echo ISUPPREEVENT +epprd_rg:process_resources[3374] read ENV_VAR +epprd_rg:process_resources[3375] [[ ISUPPREEVENT == WILLBEUPPOSTEVENT ]] +epprd_rg:process_resources[3379] [[ ISUPPREEVENT == ISUPPREEVENT ]] +epprd_rg:process_resources[3380] INFO_STRING='|SOURCE=epprda' +epprd_rg:process_resources[3381] IS_SERVICE_START=0 +epprd_rg:process_resources[3373] ENV_VAR=GROUP_epprd_rg_epprds +epprd_rg:process_resources[3374] eval 'echo $GROUP_epprd_rg_epprds' +epprd_rg:process_resources[1] echo WILLBEUPPOSTEVENT +epprd_rg:process_resources[3374] read ENV_VAR +epprd_rg:process_resources[3375] [[ WILLBEUPPOSTEVENT == WILLBEUPPOSTEVENT ]] +epprd_rg:process_resources[3376] INFO_STRING='|SOURCE=epprda|DESTINATION=epprds' +epprd_rg:process_resources[3377] IS_SERVICE_STOP=0 +epprd_rg:process_resources[3379] [[ WILLBEUPPOSTEVENT == ISUPPREEVENT ]] +epprd_rg:process_resources[3384] (( 0 == 0 && 0 ==0 )) +epprd_rg:process_resources[3385] eval 'echo $WILLBEUPPOSTEVENT' +epprd_rg:process_resources[1] echo +epprd_rg:process_resources[3385] read ENV_VAR +epprd_rg:process_resources[3386] INFO_STRING='RG_FAILOVER|epprd_rg|SOURCE=epprda|DESTINATION=epprds|8592' +epprd_rg:process_resources[3387] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3388] amlog_trace '' 'RG_FAILOVER|epprd_rg|SOURCE=epprda|DESTINATION=epprds|8592' +epprd_rg:process_resources[amlog_trace:318] clcycle clavailability.log +epprd_rg:process_resources[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:process_resources[amlog_trace:319] cltime +epprd_rg:process_resources[amlog_trace:319] DATE=2023-01-28T19:41:11.576643 +epprd_rg:process_resources[amlog_trace:320] echo '|2023-01-28T19:41:11.576643|INFO: RG_FAILOVER|epprd_rg|SOURCE=epprda|DESTINATION=epprds|8592' +epprd_rg:process_resources[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:process_resources[3660] set_resource_group_state RELEASING +epprd_rg:process_resources[set_resource_group_state:82] PS4_FUNC=set_resource_group_state +epprd_rg:process_resources[set_resource_group_state:82] typeset PS4_FUNC +epprd_rg:process_resources[set_resource_group_state:83] [[ high == high ]] +epprd_rg:process_resources[set_resource_group_state:83] set -x +epprd_rg:process_resources[set_resource_group_state:84] STAT=0 +epprd_rg:process_resources[set_resource_group_state:85] new_status=RELEASING +epprd_rg:process_resources[set_resource_group_state:89] export GROUPNAME +epprd_rg:process_resources[set_resource_group_state:90] [[ RELEASING != DOWN ]] +epprd_rg:process_resources[set_resource_group_state:92] clchdaemons -d clstrmgr_scripts -t resource_locator -n epprda -o epprd_rg -v RELEASING +epprd_rg:process_resources[set_resource_group_state:100] : Resource Manager Updates +epprd_rg:process_resources[set_resource_group_state:111] amlog_trace '' 'acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:318] clcycle clavailability.log +epprd_rg:process_resources[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:process_resources[amlog_trace:319] cltime +epprd_rg:process_resources[amlog_trace:319] DATE=2023-01-28T19:41:11.611150 +epprd_rg:process_resources[amlog_trace:320] echo '|2023-01-28T19:41:11.611150|INFO: acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:process_resources[set_resource_group_state:112] cl_RMupdate releasing epprd_rg process_resources 2023-01-28T19:41:11.635901 2023-01-28T19:41:11.640459 +epprd_rg:process_resources[set_resource_group_state:153] return 0 +epprd_rg:process_resources[3661] RC=0 +epprd_rg:process_resources[3662] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T19:41:11.652626 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=APPLICATIONS ACTION=RELEASE ALL_APPLICATIONS='"epprd_app"' RESOURCE_GROUPS='"epprd_rg' '"' MISCDATA='""' +epprd_rg:process_resources[1] JOB_TYPE=APPLICATIONS +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] ALL_APPLICATIONS=epprd_app +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] MISCDATA='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ APPLICATIONS == RELEASE ]] +epprd_rg:process_resources[3360] [[ APPLICATIONS == ONLINE ]] +epprd_rg:process_resources[3549] process_applications RELEASE +epprd_rg:process_resources[process_applications:312] PS4_FUNC=process_applications +epprd_rg:process_resources[process_applications:312] typeset PS4_FUNC +epprd_rg:process_resources[process_applications:313] [[ high == high ]] +epprd_rg:process_resources[process_applications:313] set -x +epprd_rg:process_resources[process_applications:316] : Each subprocess will log to a file with this name and PID +epprd_rg:process_resources[process_applications:318] TMP_FILE=/var/hacmp/log/.process_resources_applications.25493834 +epprd_rg:process_resources[process_applications:318] export TMP_FILE +epprd_rg:process_resources[process_applications:320] rm -f '/var/hacmp/log/.process_resources_applications*' +epprd_rg:process_resources[process_applications:322] WAITPIDS='' +epprd_rg:process_resources[process_applications:323] LPAR_ACQUIRE_FAILED=0 +epprd_rg:process_resources[process_applications:324] LPAR_RELEASE_FAILED=0 +epprd_rg:process_resources[process_applications:325] START_STOP_FAILED=0 +epprd_rg:process_resources[process_applications:326] LIST_OF_APPS=epprd_app +epprd_rg:process_resources[process_applications:329] : Acquire lpar resources in one-shot before starting applications +epprd_rg:process_resources[process_applications:331] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[process_applications:343] (( LPAR_ACQUIRE_FAILED == 0 )) +epprd_rg:process_resources[process_applications:345] : Loop through all groups to start or stop applications +epprd_rg:process_resources[process_applications:348] export GROUPNAME +epprd_rg:process_resources[process_applications:351] : Break out application data +epprd_rg:process_resources[process_applications:353] get_list_head epprd_app +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo epprd_app +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo epprd_app +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[process_applications:353] read LIST_OF_APPLICATIONS_FOR_RG +epprd_rg:process_resources[process_applications:354] get_list_tail epprd_app +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo epprd_app +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[process_applications:354] read ALL_APPLICATIONS +epprd_rg:process_resources[process_applications:356] get_list_head +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[process_applications:356] read MISCDATA_FOR_RG +epprd_rg:process_resources[process_applications:357] get_list_tail +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[process_applications:357] read MISCDATA +epprd_rg:process_resources[process_applications:359] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[process_applications:363] TMPLIST='' +epprd_rg:process_resources[process_applications:364] print epprd_app +epprd_rg:process_resources[process_applications:364] set -A appnames epprd_app +epprd_rg:process_resources[process_applications:366] (( cnt=0)) +epprd_rg:process_resources[process_applications:366] (( cnt < 1)) +epprd_rg:process_resources[process_applications:367] TMPLIST='epprd_app ' +epprd_rg:process_resources[process_applications:368] LIST_OF_APPLICATIONS_FOR_RG=epprd_app +epprd_rg:process_resources[process_applications:366] ((cnt++ )) +epprd_rg:process_resources[process_applications:366] (( cnt < 1)) +epprd_rg:process_resources[process_applications:371] LIST_OF_APPLICATIONS_FOR_RG='epprd_app ' +epprd_rg:process_resources[process_applications:374] APPLICATIONS='epprd_app ' +epprd_rg:process_resources[process_applications:374] export APPLICATIONS +epprd_rg:process_resources[process_applications:375] MISC_DATA='' +epprd_rg:process_resources[process_applications:375] export MISC_DATA +epprd_rg:process_resources[process_applications:378] : Now call start_or_stop_applications_for_rg to do the app start/stop. +epprd_rg:process_resources[process_applications:381] start_or_stop_applications_for_rg RELEASE /var/hacmp/log/.process_resources_applications.25493834.epprd_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:248] PS4_FUNC=start_or_stop_applications_for_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:248] typeset PS4_FUNC +epprd_rg:process_resources[start_or_stop_applications_for_rg:249] [[ high == high ]] +epprd_rg:process_resources[start_or_stop_applications_for_rg:249] set -x +epprd_rg:process_resources[start_or_stop_applications_for_rg:251] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[start_or_stop_applications_for_rg:255] cmd_to_execute=stop_server +epprd_rg:process_resources[start_or_stop_applications_for_rg:259] : File name to store our exit status +epprd_rg:process_resources[start_or_stop_applications_for_rg:261] STATUS_FILE=/var/hacmp/log/.process_resources_applications.25493834.epprd_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:264] : Use clcallev to run the event +epprd_rg:process_resources[start_or_stop_applications_for_rg:266] clcallev stop_server 'epprd_app ' +epprd_rg:process_resources[process_applications:384] : Add PID of the last bg start_or_stop_applications_for_rg process to WAITPIDS. +epprd_rg:process_resources[process_applications:386] WAITPIDS=' 23331166' +epprd_rg:process_resources[process_applications:390] : Wait for the start_or_stop_applications_for_rg PIDs to finish. +epprd_rg:process_resources[process_applications:393] wait 23331166 Jan 28 2023 19:41:11 EVENT START: stop_server epprd_app |2023-01-28T19:41:11|8592|EVENT START: stop_server epprd_app | +epprd_rg:stop_server[+59] version=%I% +epprd_rg:stop_server[+62] STATUS=0 +epprd_rg:stop_server[+66] [ ! -n ] +epprd_rg:stop_server[+68] EMULATE=REAL +epprd_rg:stop_server[+71] PROC_RES=false +epprd_rg:stop_server[+75] [[ APPLICATIONS != 0 ]] +epprd_rg:stop_server[+75] [[ APPLICATIONS != GROUP ]] +epprd_rg:stop_server[+76] PROC_RES=true +epprd_rg:stop_server[+79] typeset WPARNAME WPARDIR EXEC +epprd_rg:stop_server[+80] WPARDIR= +epprd_rg:stop_server[+81] EXEC= +epprd_rg:stop_server[+83] typeset -i rc=0 +epprd_rg:stop_server[+84] +epprd_rg:stop_server[+84] clwparname epprd_rg +epprd_rg:clwparname[38] version=1.3.1.1 +epprd_rg:clwparname[44] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clwparname[44] [[ -z '' ]] +epprd_rg:clwparname[44] exit 0 WPARNAME= +epprd_rg:stop_server[+85] rc=0 +epprd_rg:stop_server[+87] set -u +epprd_rg:stop_server[+90] ALLSERVERS=All_servers +epprd_rg:stop_server[+91] [ REAL = EMUL ] +epprd_rg:stop_server[+96] cl_RMupdate resource_releasing All_servers stop_server 2023-01-28T19:41:11.799648 2023-01-28T19:41:11.804083 +epprd_rg:stop_server[+101] (( 0 == 0 )) +epprd_rg:stop_server[+101] [[ -n ]] +epprd_rg:stop_server[+120] +epprd_rg:stop_server[+120] cut -d: -f3 +epprd_rg:stop_server[+120] cllsserv -cn epprd_app STOP=/etc/hacmp/epprd_stop.sh +epprd_rg:stop_server[+121] +epprd_rg:stop_server[+121] cut -d -f1 +epprd_rg:stop_server[+121] echo /etc/hacmp/epprd_stop.sh STOP_SCRIPT=/etc/hacmp/epprd_stop.sh +epprd_rg:stop_server[+123] PATTERN=epprda epprd_app +epprd_rg:stop_server[+123] [[ -n ]] +epprd_rg:stop_server[+123] [[ -z ]] +epprd_rg:stop_server[+123] [[ -x /etc/hacmp/epprd_stop.sh ]] +epprd_rg:stop_server[+133] [ REAL = EMUL ] +epprd_rg:stop_server[+139] amlog_trace Stopping application controller|epprd_app +epprd_rg:stop_server[+55] clcycle clavailability.log +epprd_rg:stop_server[+55] 1> /dev/null 2>& 1 +epprd_rg:stop_server[+55] +epprd_rg:stop_server[+55] cltime DATE=2023-01-28T19:41:11.837187 +epprd_rg:stop_server[+55] echo |2023-01-28T19:41:11.837187|INFO: Stopping application controller|epprd_app +epprd_rg:stop_server[+55] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:stop_server[+140] /etc/hacmp/epprd_stop.sh +epprd_rg:stop_server[+140] ODMDIR=/etc/objrepos #### ##### #### ##### ##### # # # #### # # # # # # # # # ## # # # #### # # # # # # # # # # # # # # # # ##### ##### # # # # # ### # # # # # # # # # ## # # #### # #### # # # # # #### ##### # ###### ####### ###### ###### # # # # # # # # # # # # # # # # # # # # # ##### # # ###### ##### ###### ###### # ####### # # # # # # # # # # # # ##### # # # ####### # # Checking EPP Database ------------------------------------------- J2EE Database is running See logfile /home/eppadm/JdbcCon.log stopping the SAP instance J00 Shutdown-Log is written to /home/eppadm/stopsap_J00.log ------------------------------------------- /usr/sap/EPP/J00/exe/sapcontrol -prot NI_HTTP -nr 00 -function Stop Instance on host epprda stopped Waiting for cleanup of resources ....................... stopping the SAP instance SCS01 Shutdown-Log is written to /home/eppadm/stopsap_SCS01.log ------------------------------------------- /usr/sap/EPP/SCS01/exe/sapcontrol -prot NI_HTTP -nr 01 -function Stop Instance on host epprda stopped Waiting for cleanup of resources .. Running /usr/sap/EPP/SYS/exe/run/stopj2eedb Trying to stop EPP database ... Log file: /home/eppadm/stopdb.log EPP database stopped /usr/sap/EPP/SYS/exe/run/stopj2eedb completed successfully stopping the SAP instance SMDA97 Shutdown-Log is written to /home/daaadm/stopsap_SMDA97.log ------------------------------------------- Instance SMDA97 was not running! LSNRCTL for IBM/AIX RISC System/6000: Version 11.2.0.3.0 - Production on 28-JAN-2023 19:42:30 Copyright (c) 1991, 2011, Oracle. All rights reserved. Connecting to (ADDRESS=(PROTOCOL=IPC)(KEY=EPP.WORLD)) The command completed successfully +epprd_rg:sh[+1] kill -9 25166226 +epprd_rg:stop_server[+141] rc=0 +epprd_rg:stop_server[+143] (( rc != 0 )) +epprd_rg:stop_server[+151] amlog_trace Stopping application controller|epprd_app +epprd_rg:stop_server[+55] clcycle clavailability.log +epprd_rg:stop_server[+55] 1> /dev/null 2>& 1 +epprd_rg:stop_server[+55] +epprd_rg:stop_server[+55] cltime DATE=2023-01-28T19:42:31.742116 +epprd_rg:stop_server[+55] echo |2023-01-28T19:42:31.742116|INFO: Stopping application controller|epprd_app +epprd_rg:stop_server[+55] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:stop_server[+174] ALLNOERRSERV=All_nonerror_servers +epprd_rg:stop_server[+175] [ REAL = EMUL ] +epprd_rg:stop_server[+180] cl_RMupdate resource_down All_nonerror_servers stop_server 2023-01-28T19:42:31.764121 2023-01-28T19:42:31.768649 +epprd_rg:stop_server[+183] exit 0 Jan 28 2023 19:42:31 EVENT COMPLETED: stop_server epprd_app 0 |2023-01-28T19:42:31|8592|EVENT COMPLETED: stop_server epprd_app 0| +epprd_rg:process_resources[start_or_stop_applications_for_rg:267] RC=0 +epprd_rg:process_resources[start_or_stop_applications_for_rg:269] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[start_or_stop_applications_for_rg:279] (( 0 != 0 )) +epprd_rg:process_resources[start_or_stop_applications_for_rg:291] : Store the result for later accumulation +epprd_rg:process_resources[start_or_stop_applications_for_rg:293] print 'epprd_rg 0' +epprd_rg:process_resources[start_or_stop_applications_for_rg:293] 1>> /var/hacmp/log/.process_resources_applications.25493834.epprd_rg +epprd_rg:process_resources[process_applications:396] : Look at all the status files to see if any were unsuccessful +epprd_rg:process_resources[process_applications:399] cat /var/hacmp/log/.process_resources_applications.25493834.epprd_rg +epprd_rg:process_resources[process_applications:399] read skip SUCCESS rest +epprd_rg:process_resources[process_applications:401] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:411] rm -f /var/hacmp/log/.process_resources_applications.25493834.epprd_rg +epprd_rg:process_resources[process_applications:416] : Release lpar resources in one-shot now that applications are stopped +epprd_rg:process_resources[process_applications:418] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[process_applications:420] GROUPNAME=epprd_rg +epprd_rg:process_resources[process_applications:420] export GROUPNAME +epprd_rg:process_resources[process_applications:421] clmanageroha -o release -s -l epprd_app +epprd_rg:process_resources[process_applications:421] 3>& 2 +epprd_rg:clmanageroha[318] : version='@(#)' 5881272 43haes/usr/sbin/cluster/events/clmanageroha.sh, 61aha_r726, 2205A_aha726, May 16 2022 12:15 PM +epprd_rg:clmanageroha[321] clodmget -n -f connection_type HACMPhmcparam +epprd_rg:clmanageroha[321] CONN_TYPE=0 +epprd_rg:clmanageroha[321] typeset -i CONN_TYPE +epprd_rg:clmanageroha[323] clodmget -q name='epprda and object like POWERVS_*' -nf name HACMPnode +epprd_rg:clmanageroha[323] 2> /dev/null +epprd_rg:clmanageroha[323] [[ -n '' ]] +epprd_rg:clmanageroha[326] export CONN_TYPE +epprd_rg:clmanageroha[331] roha_session_open -o release -s -l epprd_app +epprd_rg:clmanageroha[roha_session_open:131] roha_session.id=23331172 +epprd_rg:clmanageroha[roha_session_open:132] date +epprd_rg:clmanageroha[roha_session_open:132] LC_ALL=C +epprd_rg:clmanageroha[roha_session_open:132] roha_session_log 'Open session 23331172 at Sat Jan 28 19:42:31 KORST 2023' [ROHALOG:23331172:(0.065)] Open session 23331172 at Sat Jan 28 19:42:31 KORST 2023 +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:146] roha_session.operation=release +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:143] roha_session.systemmirror_mode=1 +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:149] roha_session.optimal_apps=epprd_app +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:163] [[ release != @(acquire|release|adjust) ]] +epprd_rg:clmanageroha[roha_session_open:168] no_roha_apps=0 +epprd_rg:clmanageroha[roha_session_open:168] typeset -i no_roha_apps +epprd_rg:clmanageroha[roha_session_open:169] need_explicit_res_rel=0 +epprd_rg:clmanageroha[roha_session_open:169] typeset -i need_explicit_res_rel +epprd_rg:clmanageroha[roha_session_open:187] [[ -n epprd_app ]] +epprd_rg:clmanageroha[roha_session_open:187] clmgr q roha +epprd_rg:clmanageroha[roha_session_open:187] uniq -d +epprd_rg:clmanageroha[roha_session_open:187] sort +epprd_rg:clmanageroha[roha_session_open:187] echo epprd_app +epprd_rg:clmanageroha[roha_session_open:187] sort -u +epprd_rg:clmanageroha[roha_session_open:187] echo '\nepprd_app' +epprd_rg:clmanageroha[roha_session_open:187] [[ -z '' ]] +epprd_rg:clmanageroha[roha_session_open:189] roha_session_log 'INFO: No ROHA configured on applications.\n' [ROHALOG:23331172:(0.505)] INFO: No ROHA configured on applications. [ROHALOG:23331172:(0.505)] +epprd_rg:clmanageroha[roha_session_open:190] no_roha_apps=1 +epprd_rg:clmanageroha[roha_session_open:195] read_tunables +epprd_rg:clmanageroha[roha_session_open:196] echo '' +epprd_rg:clmanageroha[roha_session_open:196] grep -q epprda +epprd_rg:clmanageroha[roha_session_open:197] (( 1 == 0 )) +epprd_rg:clmanageroha[roha_session_open:202] (( 1 == 1 )) +epprd_rg:clmanageroha[roha_session_open:203] roha_session_read_odm_dynresop DLPAR_MEM +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_MEM -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:203] (( 0 == 0.00 )) +epprd_rg:clmanageroha[roha_session_open:204] roha_session_read_odm_dynresop DLPAR_PROCS +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_PROCS -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:204] (( 0 == 0 )) +epprd_rg:clmanageroha[roha_session_open:205] roha_session_read_odm_dynresop DLPAR_PROC_UNITS +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_PROC_UNITS -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:205] (( 0 == 0.00 )) +epprd_rg:clmanageroha[roha_session_open:206] roha_session_log 'INFO: Nothing to be done.\n' [ROHALOG:23331172:(0.564)] INFO: Nothing to be done. [ROHALOG:23331172:(0.564)] +epprd_rg:clmanageroha[roha_session_open:207] exit 0 +epprd_rg:process_resources[process_applications:422] RC=0 +epprd_rg:process_resources[process_applications:423] (( 0 != 0 )) +epprd_rg:process_resources[process_applications:433] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:434] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:435] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:439] return 0 +epprd_rg:process_resources[3550] RC=0 +epprd_rg:process_resources[3551] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3553] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T19:42:32.427444 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=EXPORT_FILESYSTEMS ACTION=RELEASE EXPORT_FILE_SYSTEMS='"/board_org,/sapmnt/EPP"' EXPORT_FILE_SYSTEMS_V4='""' RESOURCE_GROUPS='"epprd_rg' '"' STABLE_STORAGE_PATH='""' IP_LABELS='""' DAEMONS='"NFS' '"' +epprd_rg:process_resources[1] JOB_TYPE=EXPORT_FILESYSTEMS +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] EXPORT_FILE_SYSTEMS=/board_org,/sapmnt/EPP +epprd_rg:process_resources[1] EXPORT_FILE_SYSTEMS_V4='' +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] STABLE_STORAGE_PATH='' +epprd_rg:process_resources[1] IP_LABELS='' +epprd_rg:process_resources[1] DAEMONS='NFS ' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ EXPORT_FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources[3360] [[ EXPORT_FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources[3595] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[3599] unexport_filesystems +epprd_rg:process_resources[unexport_filesystems:1576] PS4_FUNC=unexport_filesystems +epprd_rg:process_resources[unexport_filesystems:1576] typeset PS4_FUNC +epprd_rg:process_resources[unexport_filesystems:1577] [[ high == high ]] +epprd_rg:process_resources[unexport_filesystems:1577] set -x +epprd_rg:process_resources[unexport_filesystems:1578] STAT=0 +epprd_rg:process_resources[unexport_filesystems:1579] NFSSTOPPED=0 +epprd_rg:process_resources[unexport_filesystems:1580] RPCSTOPPED=0 +epprd_rg:process_resources[unexport_filesystems:1582] export NFSSTOPPED +epprd_rg:process_resources[unexport_filesystems:1585] : For NFSv4, cl_unexport_fs will use STABLE_STORAGE_PATH, which is set by +epprd_rg:process_resources[unexport_filesystems:1586] : clRGPA and can have colon-separated values for multiple RGs. +epprd_rg:process_resources[unexport_filesystems:1587] : We will save off clRGPA values in stable_storage_path and then extract +epprd_rg:process_resources[unexport_filesystems:1588] : each RG into STABLE_STORAGE_PATH for cl_unexport_fs. +epprd_rg:process_resources[unexport_filesystems:1590] stable_storage_path='' +epprd_rg:process_resources[unexport_filesystems:1590] typeset stable_storage_path +epprd_rg:process_resources[unexport_filesystems:1594] export GROUPNAME +epprd_rg:process_resources[unexport_filesystems:1596] get_list_head /board_org,/sapmnt/EPP +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo /board_org,/sapmnt/EPP +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo /board_org,/sapmnt/EPP +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[unexport_filesystems:1596] read LIST_OF_EXPORT_FILE_SYSTEMS_FOR_RG +epprd_rg:process_resources[unexport_filesystems:1597] get_list_tail /board_org,/sapmnt/EPP +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo /board_org,/sapmnt/EPP +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[unexport_filesystems:1597] read EXPORT_FILE_SYSTEMS +epprd_rg:process_resources[unexport_filesystems:1599] get_list_head +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[get_list_head:62] echo +epprd_rg:process_resources[unexport_filesystems:1599] read LIST_OF_EXPORT_FILE_SYSTEMS_V4_FOR_RG +epprd_rg:process_resources[unexport_filesystems:1600] get_list_tail +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[unexport_filesystems:1600] read EXPORT_FILE_SYSTEMS_V4 +epprd_rg:process_resources[unexport_filesystems:1601] get_list_head +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[unexport_filesystems:1601] read STABLE_STORAGE_PATH +epprd_rg:process_resources[unexport_filesystems:1602] get_list_tail +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[unexport_filesystems:1602] read stable_storage_path +epprd_rg:process_resources[unexport_filesystems:1604] cl_unexport_fs '/board_org /sapmnt/EPP' '' +epprd_rg:cl_unexport_fs[136] version=%I% +epprd_rg:cl_unexport_fs[139] . /usr/es/sbin/cluster/events/utils/cl_nfs_utils +epprd_rg:cl_unexport_fs[98] PROGNAME=cl_unexport_fs +epprd_rg:cl_unexport_fs[99] [[ high == high ]] +epprd_rg:cl_unexport_fs[101] set -x +epprd_rg:cl_unexport_fs[102] version=%I +epprd_rg:cl_unexport_fs[105] cl_exports_data='' +epprd_rg:cl_unexport_fs[105] typeset cl_exports_data +epprd_rg:cl_unexport_fs[106] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_unexport_fs[141] UNEXPORT_V3='/board_org /sapmnt/EPP' +epprd_rg:cl_unexport_fs[142] UNEXPORT_V4='' +epprd_rg:cl_unexport_fs[144] STATUS=0 +epprd_rg:cl_unexport_fs[146] PROC_RES=false +epprd_rg:cl_unexport_fs[150] [[ EXPORT_FILESYSTEMS != 0 ]] +epprd_rg:cl_unexport_fs[150] [[ EXPORT_FILESYSTEMS != GROUP ]] +epprd_rg:cl_unexport_fs[151] PROC_RES=true +epprd_rg:cl_unexport_fs[154] set -u +epprd_rg:cl_unexport_fs[156] (( 2 != 2 )) +epprd_rg:cl_unexport_fs[162] [[ __AIX__ == __AIX__ ]] +epprd_rg:cl_unexport_fs[164] oslevel -r +epprd_rg:cl_unexport_fs[164] cut -c1-2 +epprd_rg:cl_unexport_fs[164] (( 72 > 52 )) +epprd_rg:cl_unexport_fs[166] FORCE=-F +epprd_rg:cl_unexport_fs[180] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_unexport_fs[181] DARE_EVENT=reconfig_resource_release +epprd_rg:cl_unexport_fs[184] unexport_v4='' +epprd_rg:cl_unexport_fs[185] [[ -z '' ]] +epprd_rg:cl_unexport_fs[185] [[ rg_move == reconfig_resource_release ]] +epprd_rg:cl_unexport_fs[196] [[ -z '' ]] +epprd_rg:cl_unexport_fs[196] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_unexport_fs[198] unexport_v3='' +epprd_rg:cl_unexport_fs[204] getline_exports /board_org +epprd_rg:cl_unexport_fs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_unexport_fs[getline_exports:45] line='' +epprd_rg:cl_unexport_fs[getline_exports:45] typeset line +epprd_rg:cl_unexport_fs[getline_exports:46] flag=0 +epprd_rg:cl_unexport_fs[getline_exports:46] typeset -i flag +epprd_rg:cl_unexport_fs[getline_exports:47] fs=/board_org +epprd_rg:cl_unexport_fs[getline_exports:49] [[ -z /board_org ]] +epprd_rg:cl_unexport_fs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_unexport_fs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_unexport_fs[getline_exports:56] read -r line +epprd_rg:cl_unexport_fs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_unexport_fs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_unexport_fs[getline_exports:69] (( 1 == 0 )) +epprd_rg:cl_unexport_fs[getline_exports:74] [[ 0 == 1 ]] +epprd_rg:cl_unexport_fs[getline_exports:56] read -r line +epprd_rg:cl_unexport_fs[getline_exports:59] line='/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_unexport_fs[getline_exports:60] line='/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_unexport_fs[getline_exports:63] [[ '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' == #* ]] +epprd_rg:cl_unexport_fs[getline_exports:68] echo '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_unexport_fs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_unexport_fs[getline_exports:69] (( 0 == 0 )) +epprd_rg:cl_unexport_fs[getline_exports:71] flag=1 +epprd_rg:cl_unexport_fs[getline_exports:74] [[ 1 == 1 ]] +epprd_rg:cl_unexport_fs[getline_exports:76] echo '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_unexport_fs[getline_exports:76] grep -w '\\' +epprd_rg:cl_unexport_fs[getline_exports:76] [[ -n '' ]] +epprd_rg:cl_unexport_fs[getline_exports:81] cl_exports_data=' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_unexport_fs[getline_exports:82] break +epprd_rg:cl_unexport_fs[getline_exports:89] return 0 +epprd_rg:cl_unexport_fs[205] export_line=' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_unexport_fs[210] echo /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds +epprd_rg:cl_unexport_fs[210] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_unexport_fs[211] cut -d- -f2- +epprd_rg:cl_unexport_fs[211] tr , ' ' +epprd_rg:cl_unexport_fs[210] options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap:epprd:epprda:epprds root=epprdap:epprd:epprda:epprds ' +epprd_rg:cl_unexport_fs[217] vers_missing=1 +epprd_rg:cl_unexport_fs[240] (( vers_missing )) +epprd_rg:cl_unexport_fs[240] unexport_v3=' /board_org' +epprd_rg:cl_unexport_fs[204] getline_exports /sapmnt/EPP +epprd_rg:cl_unexport_fs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_unexport_fs[getline_exports:45] line='' +epprd_rg:cl_unexport_fs[getline_exports:45] typeset line +epprd_rg:cl_unexport_fs[getline_exports:46] flag=0 +epprd_rg:cl_unexport_fs[getline_exports:46] typeset -i flag +epprd_rg:cl_unexport_fs[getline_exports:47] fs=/sapmnt/EPP +epprd_rg:cl_unexport_fs[getline_exports:49] [[ -z /sapmnt/EPP ]] +epprd_rg:cl_unexport_fs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_unexport_fs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_unexport_fs[getline_exports:56] read -r line +epprd_rg:cl_unexport_fs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_unexport_fs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:68] grep -q '^[[:space:]]*/sapmnt/EPP[[:space:]]' +epprd_rg:cl_unexport_fs[getline_exports:69] (( 0 == 0 )) +epprd_rg:cl_unexport_fs[getline_exports:71] flag=1 +epprd_rg:cl_unexport_fs[getline_exports:74] [[ 1 == 1 ]] +epprd_rg:cl_unexport_fs[getline_exports:76] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:76] grep -w '\\' +epprd_rg:cl_unexport_fs[getline_exports:76] [[ -n '' ]] +epprd_rg:cl_unexport_fs[getline_exports:81] cl_exports_data=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:82] break +epprd_rg:cl_unexport_fs[getline_exports:89] return 0 +epprd_rg:cl_unexport_fs[205] export_line=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[210] echo /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_unexport_fs[210] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_unexport_fs[211] cut -d- -f2- +epprd_rg:cl_unexport_fs[211] tr , ' ' +epprd_rg:cl_unexport_fs[210] options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap root=epprdap ' +epprd_rg:cl_unexport_fs[217] vers_missing=1 +epprd_rg:cl_unexport_fs[240] (( vers_missing )) +epprd_rg:cl_unexport_fs[240] unexport_v3=' /board_org /sapmnt/EPP' +epprd_rg:cl_unexport_fs[243] UNEXPORT_V3=' /board_org /sapmnt/EPP' +epprd_rg:cl_unexport_fs[244] UNEXPORT_V4='' +epprd_rg:cl_unexport_fs[247] hasrv='' +epprd_rg:cl_unexport_fs[249] [[ -z '' ]] +epprd_rg:cl_unexport_fs[251] query=name='STABLE_STORAGE_PATH AND group=epprd_rg' +epprd_rg:cl_unexport_fs[252] odmget -q name='STABLE_STORAGE_PATH AND group=epprd_rg' HACMPresource +epprd_rg:cl_unexport_fs[252] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_unexport_fs[252] STABLE_STORAGE_PATH='' +epprd_rg:cl_unexport_fs[256] [[ -z '' ]] +epprd_rg:cl_unexport_fs[258] STABLE_STORAGE_PATH=/var/adm/nfsv4.hacmp/epprd_rg +epprd_rg:cl_unexport_fs[261] [[ -z '' ]] +epprd_rg:cl_unexport_fs[263] query=name='SERVICE_LABEL AND group=epprd_rg' +epprd_rg:cl_unexport_fs[264] odmget -q name='SERVICE_LABEL AND group=epprd_rg' HACMPresource +epprd_rg:cl_unexport_fs[264] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_unexport_fs[264] SERVICE_LABEL=epprd +epprd_rg:cl_unexport_fs[268] ps -eo args +epprd_rg:cl_unexport_fs[268] grep -w nfsd +epprd_rg:cl_unexport_fs[268] grep -qw -- '-gp on' +epprd_rg:cl_unexport_fs[272] gp=off +epprd_rg:cl_unexport_fs[275] /usr/sbin/bootinfo -K +epprd_rg:cl_unexport_fs[275] KERNEL_BITS=64 +epprd_rg:cl_unexport_fs[277] [[ off == on ]] +epprd_rg:cl_unexport_fs[282] NFSv4_REGISTERED=0 +epprd_rg:cl_unexport_fs[286] V3=:2:3 +epprd_rg:cl_unexport_fs[287] V4=:4 +epprd_rg:cl_unexport_fs[289] [[ rg_move != reconfig_resource_release ]] +epprd_rg:cl_unexport_fs[290] [[ rg_move != release_vg_fs ]] +epprd_rg:cl_unexport_fs[298] [[ -n '' ]] +epprd_rg:cl_unexport_fs[321] V3='' +epprd_rg:cl_unexport_fs[322] V4='' +epprd_rg:cl_unexport_fs[326] ALLEXPORTS=All_exports +epprd_rg:cl_unexport_fs[328] cl_RMupdate resource_releasing All_exports cl_unexport_fs 2023-01-28T19:42:32.702537 2023-01-28T19:42:32.706872 +epprd_rg:cl_unexport_fs[330] echo /board_org /sapmnt/EPP +epprd_rg:cl_unexport_fs[330] tr ' ' '\n' +epprd_rg:cl_unexport_fs[330] sort +epprd_rg:cl_unexport_fs[330] FILESYSTEM_LIST=$'/board_org\n/sapmnt/EPP' +epprd_rg:cl_unexport_fs[334] v3='' +epprd_rg:cl_unexport_fs[335] v4='' +epprd_rg:cl_unexport_fs[336] root='' +epprd_rg:cl_unexport_fs[337] old_options='' +epprd_rg:cl_unexport_fs[338] new_options='' +epprd_rg:cl_unexport_fs[340] exportfs +epprd_rg:cl_unexport_fs[340] grep '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_unexport_fs[340] export_line='/board_org -root=epprd:epprda:epprds' +epprd_rg:cl_unexport_fs[342] [[ -z '/board_org -root=epprd:epprda:epprds' ]] +epprd_rg:cl_unexport_fs[344] echo /board_org -root=epprd:epprda:epprds +epprd_rg:cl_unexport_fs[344] cut '-d ' -f2- +epprd_rg:cl_unexport_fs[344] cut -d- -f2- +epprd_rg:cl_unexport_fs[344] tr , ' ' +epprd_rg:cl_unexport_fs[344] old_options=root=epprd:epprda:epprds +epprd_rg:cl_unexport_fs[365] new_options=,root=epprd:epprda:epprds +epprd_rg:cl_unexport_fs[371] [[ -z '' ]] +epprd_rg:cl_unexport_fs[371] v3='' +epprd_rg:cl_unexport_fs[377] NFS_VER3='' +epprd_rg:cl_unexport_fs[380] [[ /board_org == /board_org ]] +epprd_rg:cl_unexport_fs[380] v3='' +epprd_rg:cl_unexport_fs[380] NFS_VER3=3 +epprd_rg:cl_unexport_fs[380] break +epprd_rg:cl_unexport_fs[382] NFS_VER4='' +epprd_rg:cl_unexport_fs[387] [[ '' == 4 ]] +epprd_rg:cl_unexport_fs[400] echo '' +epprd_rg:cl_unexport_fs[400] cut -d: -f2- +epprd_rg:cl_unexport_fs[400] vers='' +epprd_rg:cl_unexport_fs[402] [[ -z '' ]] +epprd_rg:cl_unexport_fs[404] [[ '' == 4 ]] +epprd_rg:cl_unexport_fs[408] exportfs -i -u -F /board_org exportfs: unexported /board_org +epprd_rg:cl_unexport_fs[410] (( 0 != 0 )) +epprd_rg:cl_unexport_fs[417] continue +epprd_rg:cl_unexport_fs[334] v3='' +epprd_rg:cl_unexport_fs[335] v4='' +epprd_rg:cl_unexport_fs[336] root='' +epprd_rg:cl_unexport_fs[337] old_options='' +epprd_rg:cl_unexport_fs[338] new_options='' +epprd_rg:cl_unexport_fs[340] exportfs +epprd_rg:cl_unexport_fs[340] grep '^[[:space:]]*/sapmnt/EPP[[:space:]]' +epprd_rg:cl_unexport_fs[340] export_line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[342] [[ -z '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' ]] +epprd_rg:cl_unexport_fs[344] echo /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_unexport_fs[344] cut '-d ' -f2- +epprd_rg:cl_unexport_fs[344] cut -d- -f2- +epprd_rg:cl_unexport_fs[344] tr , ' ' +epprd_rg:cl_unexport_fs[344] old_options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap root=epprdap' +epprd_rg:cl_unexport_fs[365] new_options=,sec=sys:krb5p:krb5i:krb5:dh +epprd_rg:cl_unexport_fs[365] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw +epprd_rg:cl_unexport_fs[365] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap +epprd_rg:cl_unexport_fs[365] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_unexport_fs[371] [[ -z '' ]] +epprd_rg:cl_unexport_fs[371] v3='' +epprd_rg:cl_unexport_fs[377] NFS_VER3='' +epprd_rg:cl_unexport_fs[380] [[ /sapmnt/EPP == /board_org ]] +epprd_rg:cl_unexport_fs[380] [[ /sapmnt/EPP == /sapmnt/EPP ]] +epprd_rg:cl_unexport_fs[380] v3='' +epprd_rg:cl_unexport_fs[380] NFS_VER3=3 +epprd_rg:cl_unexport_fs[380] break +epprd_rg:cl_unexport_fs[382] NFS_VER4='' +epprd_rg:cl_unexport_fs[387] [[ '' == 4 ]] +epprd_rg:cl_unexport_fs[400] echo '' +epprd_rg:cl_unexport_fs[400] cut -d: -f2- +epprd_rg:cl_unexport_fs[400] vers='' +epprd_rg:cl_unexport_fs[402] [[ -z '' ]] +epprd_rg:cl_unexport_fs[404] [[ '' == 4 ]] +epprd_rg:cl_unexport_fs[408] exportfs -i -u -F /sapmnt/EPP exportfs: unexported /sapmnt/EPP +epprd_rg:cl_unexport_fs[410] (( 0 != 0 )) +epprd_rg:cl_unexport_fs[417] continue +epprd_rg:cl_unexport_fs[452] [[ -n '' ]] +epprd_rg:cl_unexport_fs[480] ALLNOERREXPORT=All_nonerror_exports +epprd_rg:cl_unexport_fs[482] cl_RMupdate resource_down All_nonerror_exports cl_unexport_fs 2023-01-28T19:42:32.792702 2023-01-28T19:42:32.796993 +epprd_rg:cl_unexport_fs[484] exit 0 +epprd_rg:process_resources[unexport_filesystems:1608] return 0 +epprd_rg:process_resources[3600] RC=0 +epprd_rg:process_resources[3601] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3603] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T19:42:32.810500 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=FILESYSTEMS ACTION=RELEASE FILE_SYSTEMS='"/usr/sap,/sapmnt,/oracle/EPP/sapdata4,/oracle/EPP/sapdata3,/oracle/EPP/sapdata2,/oracle/EPP/sapdata1,/oracle/EPP/origlogB,/oracle/EPP/origlogA,/oracle/EPP/oraarch,/oracle/EPP/mirrlogB,/oracle/EPP/mirrlogA,/oracle/EPP,/oracle,/board_org"' RESOURCE_GROUPS='"epprd_rg' '"' FSCHECK_TOOLS='""' RECOVERY_METHODS='"sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential"' +epprd_rg:process_resources[1] JOB_TYPE=FILESYSTEMS +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] FILE_SYSTEMS=/usr/sap,/sapmnt,/oracle/EPP/sapdata4,/oracle/EPP/sapdata3,/oracle/EPP/sapdata2,/oracle/EPP/sapdata1,/oracle/EPP/origlogB,/oracle/EPP/origlogA,/oracle/EPP/oraarch,/oracle/EPP/mirrlogB,/oracle/EPP/mirrlogA,/oracle/EPP,/oracle,/board_org +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] FSCHECK_TOOLS='' +epprd_rg:process_resources[1] RECOVERY_METHODS=sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources[3360] [[ FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources[3482] process_file_systems RELEASE +epprd_rg:process_resources[process_file_systems:2640] PS4_FUNC=process_file_systems +epprd_rg:process_resources[process_file_systems:2640] typeset PS4_FUNC +epprd_rg:process_resources[process_file_systems:2641] [[ high == high ]] +epprd_rg:process_resources[process_file_systems:2641] set -x +epprd_rg:process_resources[process_file_systems:2643] STAT=0 +epprd_rg:process_resources[process_file_systems:2645] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[process_file_systems:2667] cl_deactivate_fs +epprd_rg:cl_deactivate_fs[860] version=1.6 +epprd_rg:cl_deactivate_fs[863] STATUS=0 +epprd_rg:cl_deactivate_fs[863] typeset -li STATUS +epprd_rg:cl_deactivate_fs[864] SLEEP=1 +epprd_rg:cl_deactivate_fs[864] typeset -li SLEEP +epprd_rg:cl_deactivate_fs[865] LIMIT=60 +epprd_rg:cl_deactivate_fs[865] typeset -li LIMIT +epprd_rg:cl_deactivate_fs[866] export SLEEP +epprd_rg:cl_deactivate_fs[867] export LIMIT +epprd_rg:cl_deactivate_fs[868] TMP_FILENAME=_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs[870] (( 0 != 0 )) +epprd_rg:cl_deactivate_fs[875] OEM_CALL=false +epprd_rg:cl_deactivate_fs[879] : Check here to see if the forced unmount option can be used +epprd_rg:cl_deactivate_fs[881] FORCE_OK='' +epprd_rg:cl_deactivate_fs[881] export FORCE_OK +epprd_rg:cl_deactivate_fs[882] O_FlAG='' +epprd_rg:cl_deactivate_fs[882] export O_FlAG +epprd_rg:cl_deactivate_fs[885] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_deactivate_fs[886] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_deactivate_fs[887] : 99.99.999.999 +epprd_rg:cl_deactivate_fs[889] typeset -li V R M F +epprd_rg:cl_deactivate_fs[890] typeset -Z2 R +epprd_rg:cl_deactivate_fs[891] typeset -Z3 M +epprd_rg:cl_deactivate_fs[892] typeset -Z3 F +epprd_rg:cl_deactivate_fs[893] jfs2_lvl=601002000 +epprd_rg:cl_deactivate_fs[893] typeset -li jfs2_lvl +epprd_rg:cl_deactivate_fs[894] fuser_lvl=601004000 +epprd_rg:cl_deactivate_fs[894] typeset -li fuser_lvl +epprd_rg:cl_deactivate_fs[895] VRMF=0 +epprd_rg:cl_deactivate_fs[895] typeset -li VRMF +epprd_rg:cl_deactivate_fs[898] : Here try and figure out what level of JFS2 is installed +epprd_rg:cl_deactivate_fs[900] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_deactivate_fs[900] cut -f3 -d: +epprd_rg:cl_deactivate_fs[900] read V R M F +epprd_rg:cl_deactivate_fs[900] IFS=. +epprd_rg:cl_deactivate_fs[901] VRMF=702005102 +epprd_rg:cl_deactivate_fs[903] (( 702005102 >= 601002000 )) +epprd_rg:cl_deactivate_fs[906] : JFS2 at this level that supports forced unmount +epprd_rg:cl_deactivate_fs[908] FORCE_OK=true +epprd_rg:cl_deactivate_fs[911] (( 702005102 >= 601004000 )) +epprd_rg:cl_deactivate_fs[914] : fuser at this level supports the -O flag +epprd_rg:cl_deactivate_fs[916] O_FLAG=-O +epprd_rg:cl_deactivate_fs[920] : if JOB_TYPE is set and is not GROUP, then process_resources is parent +epprd_rg:cl_deactivate_fs[922] [[ FILESYSTEMS != 0 ]] +epprd_rg:cl_deactivate_fs[922] [[ FILESYSTEMS != GROUP ]] +epprd_rg:cl_deactivate_fs[923] deactivate_fs_process_resources +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:705] STATUS=0 +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:705] typeset -li STATUS +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:708] : for the temp file, just take the first rg name +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:710] print epprd_rg +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:710] cut -f 1 -d ' ' +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:710] read RES_GRP +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:711] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:714] : Remove the status file if already exists +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:716] rm -f /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:719] : go through all resource groups +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:721] pid_list='' +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:724] export GROUPNAME +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:725] export RECOVERY_METHOD +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:728] : Get a reverse sorted list of the filesystems in this RG so that they +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:729] : release in opposite order of mounting. This is needed for nested mounts. +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:731] print /usr/sap,/sapmnt,/oracle/EPP/sapdata4,/oracle/EPP/sapdata3,/oracle/EPP/sapdata2,/oracle/EPP/sapdata1,/oracle/EPP/origlogB,/oracle/EPP/origlogA,/oracle/EPP/oraarch,/oracle/EPP/mirrlogB,/oracle/EPP/mirrlogA,/oracle/EPP,/oracle,/board_org +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:731] read LIST_OF_FILE_SYSTEMS_FOR_RG FILE_SYSTEMS +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:731] IFS=: +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:732] print /usr/sap,/sapmnt,/oracle/EPP/sapdata4,/oracle/EPP/sapdata3,/oracle/EPP/sapdata2,/oracle/EPP/sapdata1,/oracle/EPP/origlogB,/oracle/EPP/origlogA,/oracle/EPP/oraarch,/oracle/EPP/mirrlogB,/oracle/EPP/mirrlogA,/oracle/EPP,/oracle,/board_org +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:732] sort -ru +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:732] tr , '\n' +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:732] LIST_OF_FILE_SYSTEMS_FOR_RG=$'/usr/sap\n/sapmnt\n/oracle/EPP/sapdata4\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata1\n/oracle/EPP/origlogB\n/oracle/EPP/origlogA\n/oracle/EPP/oraarch\n/oracle/EPP/mirrlogB\n/oracle/EPP/mirrlogA\n/oracle/EPP\n/oracle\n/board_org' +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:733] find_nested_mounts $'/usr/sap\n/sapmnt\n/oracle/EPP/sapdata4\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata1\n/oracle/EPP/origlogB\n/oracle/EPP/origlogA\n/oracle/EPP/oraarch\n/oracle/EPP/mirrlogB\n/oracle/EPP/mirrlogA\n/oracle/EPP\n/oracle\n/board_org' +epprd_rg:cl_deactivate_fs[find_nested_mounts:88] given_fs_list=$'/usr/sap\n/sapmnt\n/oracle/EPP/sapdata4\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata1\n/oracle/EPP/origlogB\n/oracle/EPP/origlogA\n/oracle/EPP/oraarch\n/oracle/EPP/mirrlogB\n/oracle/EPP/mirrlogA\n/oracle/EPP\n/oracle\n/board_org' +epprd_rg:cl_deactivate_fs[find_nested_mounts:88] typeset given_fs_list +epprd_rg:cl_deactivate_fs[find_nested_mounts:90] typeset first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:91] mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:91] mount_out=$' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 18:16 bg,soft,intr,sec=sys,rw\nepprd /board_org /board nfs3 Jan 28 18:18 ' +epprd_rg:cl_deactivate_fs[find_nested_mounts:91] typeset mount_out +epprd_rg:cl_deactivate_fs[find_nested_mounts:92] discovered_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:92] typeset discovered_fs +epprd_rg:cl_deactivate_fs[find_nested_mounts:93] typeset line fs nested_fs +epprd_rg:cl_deactivate_fs[find_nested_mounts:94] typeset mounted_fs_list +epprd_rg:cl_deactivate_fs[find_nested_mounts:96] fs_count=0 +epprd_rg:cl_deactivate_fs[find_nested_mounts:96] typeset -li fs_count +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 18:16 bg,soft,intr,sec=sys,rw\nepprd /board_org /board nfs3 Jan 28 18:18 ' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /usr/sap +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=$' /dev/saplv /usr/sap jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 18:16 bg,soft,intr,sec=sys,rw' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- $' /dev/saplv /usr/sap jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 18:16 bg,soft,intr,sec=sys,rw' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 2' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 2 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] echo $' /dev/saplv /usr/sap jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 18:16 bg,soft,intr,sec=sys,rw' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/saplv /usr/sap jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /usr/sap == /usr/sap/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:128] [[ jfs2 == /usr/sap/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n '' ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print 'epdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 18:16 bg,soft,intr,sec=sys,rw' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /usr/sap/trans == /usr/sap/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /usr/sap/trans == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:128] [[ /usr/sap/trans == /usr/sap/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:128] [[ nfs3 == nfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:131] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:132] : exporting_node exported_file_system lower_mount_point vfs +epprd_rg:cl_deactivate_fs[find_nested_mounts:133] : epdev /usr/sap/trans /usr/sap/trans nfs3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:135] nested_fs=/usr/sap/trans +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /usr/sap/trans ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /usr/sap/trans +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /sapmnt /oracle/EPP/sapdata2 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 18:16 bg,soft,intr,sec=sys,rw\nepprd /board_org /board nfs3 Jan 28 18:18 ' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/sapmntlv /sapmnt jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/sapmntlv /sapmnt jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 18:16 bg,soft,intr,sec=sys,rw\nepprd /board_org /board nfs3 Jan 28 18:18 ' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 18:16 bg,soft,intr,sec=sys,rw\nepprd /board_org /board nfs3 Jan 28 18:18 ' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 18:16 bg,soft,intr,sec=sys,rw\nepprd /board_org /board nfs3 Jan 28 18:18 ' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 18:16 bg,soft,intr,sec=sys,rw\nepprd /board_org /board nfs3 Jan 28 18:18 ' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 18:16 bg,soft,intr,sec=sys,rw\nepprd /board_org /board nfs3 Jan 28 18:18 ' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 18:16 bg,soft,intr,sec=sys,rw\nepprd /board_org /board nfs3 Jan 28 18:18 ' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 18:16 bg,soft,intr,sec=sys,rw\nepprd /board_org /board nfs3 Jan 28 18:18 ' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 18:16 bg,soft,intr,sec=sys,rw\nepprd /board_org /board nfs3 Jan 28 18:18 ' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 18:16 bg,soft,intr,sec=sys,rw\nepprd /board_org /board nfs3 Jan 28 18:18 ' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 18:16 bg,soft,intr,sec=sys,rw\nepprd /board_org /board nfs3 Jan 28 18:18 ' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=$' /dev/epplv /oracle/EPP jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- $' /dev/epplv /oracle/EPP jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 10' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 10 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] echo $' /dev/epplv /oracle/EPP jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/epplv /oracle/EPP jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:128] [[ jfs2 == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n '' ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/mirrlogA == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/mirrlogAlv /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/mirrlogA ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/mirrlogB == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/mirrlogBlv /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/mirrlogB ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/oraarch == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/oraarchlv /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/oraarch ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/origlogA == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/origlogAlv /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/origlogA ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/origlogB == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/origlogBlv /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/origlogB ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata1 == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata1lv /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata1 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata2 == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata2lv /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata2 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata3 == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata3lv /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata3 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata4 == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata4lv /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata4 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 18:16 bg,soft,intr,sec=sys,rw\nepprd /board_org /board nfs3 Jan 28 18:18 ' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=$' /dev/oraclelv /oracle jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- $' /dev/oraclelv /oracle jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 11' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 11 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] echo $' /dev/oraclelv /oracle jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/oraclelv /oracle jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:128] [[ jfs2 == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n '' ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/epplv /oracle/EPP jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/epplv /oracle/EPP +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/mirrlogA == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/mirrlogAlv /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/mirrlogA ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/mirrlogB == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/mirrlogBlv /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/mirrlogB ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/oraarch == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/oraarchlv /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/oraarch ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/origlogA == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/origlogAlv /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/origlogA ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/origlogB == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/origlogBlv /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/origlogB ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata1 == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata1lv /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata1 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata2 == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata2lv /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata2 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata3 == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata3lv /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata3 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata4 == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata4lv /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata4 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /board_org +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 18:07 rw,log=/dev/epprdaloglv\nepdev /usr/sap/trans /usr/sap/trans nfs3 Jan 28 18:16 bg,soft,intr,sec=sys,rw\nepprd /board_org /board nfs3 Jan 28 18:18 ' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=$' /dev/boardlv /board_org jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\nepprd /board_org /board nfs3 Jan 28 18:18 ' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- $' /dev/boardlv /board_org jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\nepprd /board_org /board nfs3 Jan 28 18:18 ' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 2' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 2 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] echo $' /dev/boardlv /board_org jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv\nepprd /board_org /board nfs3 Jan 28 18:18 ' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/boardlv /board_org jfs2 Jan 28 18:06 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /board_org == /board_org/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:128] [[ jfs2 == /board_org/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n '' ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print 'epprd /board_org /board nfs3 Jan 28 18:18' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /board_org == /board_org/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:128] [[ /board == /board_org/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n '' ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:150] : Pass comprehensive list to stdout, sorted to get correct unmount order +epprd_rg:cl_deactivate_fs[find_nested_mounts:152] print -- $'/usr/sap\n/sapmnt\n/oracle/EPP/sapdata4\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata1\n/oracle/EPP/origlogB\n/oracle/EPP/origlogA\n/oracle/EPP/oraarch\n/oracle/EPP/mirrlogB\n/oracle/EPP/mirrlogA\n/oracle/EPP\n/oracle\n/board_org' ' /usr/sap/trans /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[find_nested_mounts:152] sort -ru +epprd_rg:cl_deactivate_fs[find_nested_mounts:152] tr ' ' '\n' +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:733] LIST_OF_FILE_SYSTEMS_FOR_RG=$'/usr/sap/trans\n/usr/sap\n/sapmnt\n/oracle/EPP/sapdata4\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata1\n/oracle/EPP/origlogB\n/oracle/EPP/origlogA\n/oracle/EPP/oraarch\n/oracle/EPP/mirrlogB\n/oracle/EPP/mirrlogA\n/oracle/EPP\n/oracle\n/board_org' +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:736] : Get the recovery method used for all filesystems in this resource group +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:738] print sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:738] read RECOVERY_METHOD RECOVERY_METHODS +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:738] IFS=: +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:739] print sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:739] cut -f 1 -d , +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:739] RECOVERY_METHOD=sequential +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:742] : verify the recovery method +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:744] RECOVERY_METHOD=sequential +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:745] RECOVERY_METHOD=sequential +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:747] [[ sequential != sequential ]] +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:754] : Tell the cluster manager what we are going to do +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:756] ALLFS=All_filesystems +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:757] cl_RMupdate resource_releasing All_filesystems cl_deactivate_fs 2023-01-28T19:42:33.144686 2023-01-28T19:42:33.149293 +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:760] : now that all variables are set, perform the umounts +epprd_rg:cl_deactivate_fs:/usr/sap/trans[deactivate_fs_process_resources:764] PS4_LOOP=/usr/sap/trans +epprd_rg:cl_deactivate_fs:/usr/sap/trans[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/usr/sap/trans[deactivate_fs_process_resources:770] fs_umount /usr/sap/trans cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.327)[fs_umount:313] FS=/usr/sap/trans +epprd_rg:cl_deactivate_fs(0.327)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(0.327)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(0.327)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(0.328)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.328)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(0.328)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(0.348)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(0.348)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(0.348)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(0.348)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(0.348)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(0.348)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(0.348)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(0.348)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(0.348)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(0.348)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(0.348)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(0.348)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(0.348)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(0.349)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(0.349)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(0.349)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(0.349)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(0.350)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/usr/sap/trans +epprd_rg:cl_deactivate_fs(0.350)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(0.355)[fs_umount:332] fs_type=nfs3 +epprd_rg:cl_deactivate_fs(0.355)[fs_umount:333] [[ nfs3 == nfs* ]] +epprd_rg:cl_deactivate_fs(0.355)[fs_umount:336] : unmount nfs filesystem +epprd_rg:cl_deactivate_fs(0.355)[fs_umount:338] umount /usr/sap/trans +epprd_rg:cl_deactivate_fs(0.362)[fs_umount:358] : append status to the status file +epprd_rg:cl_deactivate_fs(0.362)[fs_umount:360] print -- 0 /usr/sap/trans +epprd_rg:cl_deactivate_fs(0.362)[fs_umount:360] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.362)[fs_umount:361] return 0 +epprd_rg:cl_deactivate_fs:/usr/sap[deactivate_fs_process_resources:764] PS4_LOOP=/usr/sap +epprd_rg:cl_deactivate_fs:/usr/sap[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/usr/sap[deactivate_fs_process_resources:770] fs_umount /usr/sap cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.362)[fs_umount:313] FS=/usr/sap +epprd_rg:cl_deactivate_fs(0.362)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(0.362)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(0.362)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(0.363)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.363)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(0.363)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(0.383)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(0.383)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(0.383)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(0.383)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(0.383)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(0.383)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(0.383)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(0.383)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(0.383)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(0.383)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(0.383)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(0.383)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(0.383)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(0.383)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(0.383)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(0.383)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(0.383)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(0.385)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/usr/sap +epprd_rg:cl_deactivate_fs(0.385)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(0.389)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(0.389)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(0.389)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(0.389)[fs_umount:367] lsfs -c /usr/sap +epprd_rg:cl_deactivate_fs(0.393)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/usr/sap:/dev/saplv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(0.393)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(0.394)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/usr/sap:/dev/saplv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(0.395)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(0.394)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(0.395)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(0.397)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(0.397)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(0.397)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(0.398)[fs_umount:394] awk '{ if ( $1 == "/dev/saplv" ) print $2 }' +epprd_rg:cl_deactivate_fs(0.398)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(0.399)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(0.403)[fs_umount:394] FS_MOUNTED=/usr/sap +epprd_rg:cl_deactivate_fs(0.403)[fs_umount:395] [[ -n /usr/sap ]] +epprd_rg:cl_deactivate_fs(0.403)[fs_umount:397] [[ /usr/sap != /usr/sap ]] +epprd_rg:cl_deactivate_fs(0.403)[fs_umount:409] [[ /usr/sap == / ]] +epprd_rg:cl_deactivate_fs(0.403)[fs_umount:409] [[ /usr/sap == /usr ]] +epprd_rg:cl_deactivate_fs(0.403)[fs_umount:409] [[ /usr/sap == /dev ]] +epprd_rg:cl_deactivate_fs(0.403)[fs_umount:409] [[ /usr/sap == /proc ]] +epprd_rg:cl_deactivate_fs(0.403)[fs_umount:409] [[ /usr/sap == /var ]] +epprd_rg:cl_deactivate_fs(0.403)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/usr/sap' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:33.254434 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:33.254434|INFO: Deactivating Filesystem|/usr/sap' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(0.432)[fs_umount:427] : Try up to 60 times to unmount /usr/sap +epprd_rg:cl_deactivate_fs(0.432)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(0.432)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(0.432)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(0.435)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 19:42:33.000 +epprd_rg:cl_deactivate_fs(0.435)[fs_umount:434] umount /usr/sap umount: error unmounting /dev/saplv: Device busy +epprd_rg:cl_deactivate_fs(1.351)[fs_umount:442] : At this point, unmount of /usr/sap has not worked. Attempt a SIGKILL to +epprd_rg:cl_deactivate_fs(1.351)[fs_umount:443] : all processes having open file descriptors on this LV and FS. +epprd_rg:cl_deactivate_fs(1.351)[fs_umount:445] date '+%h %d %H:%M:%S.000' Jan 28 19:42:34.000 +epprd_rg:cl_deactivate_fs(1.354)[fs_umount:453] clodmget -n -q name=MOUNT_FILESYSTEM -f group HACMPresource +epprd_rg:cl_deactivate_fs(1.357)[fs_umount:453] crossmount_rg=epprd_rg +epprd_rg:cl_deactivate_fs(1.357)[fs_umount:457] clodmget -n -f value -q group='epprd_rg and name=FS_BEFORE_IPADDR' HACMPresource +epprd_rg:cl_deactivate_fs(1.361)[fs_umount:457] [[ false == true ]] +epprd_rg:cl_deactivate_fs(1.362)[fs_umount:468] mount +epprd_rg:cl_deactivate_fs(1.362)[fs_umount:468] LC_ALL=C +epprd_rg:cl_deactivate_fs(1.362)[fs_umount:468] awk '$4~ /nfs/ { print $4} ' +epprd_rg:cl_deactivate_fs(1.364)[fs_umount:468] grep -iq nfs +epprd_rg:cl_deactivate_fs(1.367)[fs_umount:469] (( 0 == 0 )) +epprd_rg:cl_deactivate_fs(1.367)[fs_umount:470] disable_procfile_debug=true +epprd_rg:cl_deactivate_fs(1.367)[fs_umount:475] : Record the open files on /dev/saplv and /usr/sap, and the processes that we are +epprd_rg:cl_deactivate_fs(1.367)[fs_umount:476] : about to kill. +epprd_rg:cl_deactivate_fs(1.367)[fs_umount:478] fuser -O -u -x /dev/saplv +epprd_rg:cl_deactivate_fs(1.368)[fs_umount:478] 2> /dev/null +epprd_rg:cl_deactivate_fs(1.387)[fs_umount:478] pidlist=' 18088446 18612598 19464692 20709668 24117562 26870066' +epprd_rg:cl_deactivate_fs(1.387)[fs_umount:482] : Process 18088446 has open files on /usr/sap. Record information about this +epprd_rg:cl_deactivate_fs(1.387)[fs_umount:483] : process in case anyone is later suprised by this action. +epprd_rg:cl_deactivate_fs(1.387)[fs_umount:485] ps ewwww 18088446 PID TTY STAT TIME COMMAND 18088446 - A 0:00 /usr/sap/EPP/J00/exe/sapstartsrv pf=/usr/sap/EPP/SYS/profile/EPP_J00_epprd -D _=/usr/sap/EPP/J00/exe/sapstartsrv LANG=en_US THREAD=NOPS LOGIN=eppadm CLCMD_PASSTHRU=1 PATH=/oracle/EPP/112_64/bin:/usr/bin:/etc:/usr/sbin:/usr/ucb:/home/eppadm/bin:/usr/bin/X11:/sbin:.:/usr/sap/EPP/SYS/exe/uc/rs6000_64:/usr/sap/EPP/SYS/exe/run:/home/eppadm NLS_LANG=AMERICAN_AMERICA.UTF8 EXTENDED_HISTORY=ON RSEC_SSFS_KEYPATH=/usr/sap/EPP/SYS/global/security/rsecssfs/key ORACLE_BASE=/oracle LC__FASTMSG=true LOGNAME=eppadm MAIL=/var/spool/mail/eppadm dbs_ora_tnsname=EPP ORACLE_SID=EPP LOCPATH=/usr/lib/nls/loc DB_SID=EPP DIR_LIBRARY=/usr/sap/EPP/J00/exe USER=eppadm AUTHSTATE=files TNS_ADMIN=/usr/sap/EPP/SYS/profile/oracle IBM_JAVA_OPTIONS=-Xmx256M -Xj9 SAPEXE=/usr/sap/EPP/SYS/exe/run SHELL=/bin/csh ODMDIR=/etc/objrepos CLUSTER_OVERRIDE=yes HISTSIZE=10000 SAPDATA_HOME=/oracle/EPP RSEC_SSFS_DATAPATH=/usr/sap/EPP/SYS/global/security/rsecssfs/data HOME=/home/eppadm TERM=vt100 MAILMSG=[YOU HAVE NEW MAIL] ORACLE_HOME=/oracle/EPP/112_64 rsdb_ssfs_connect=0 PWD=/usr/sap/EPP/SYS/profile TZ=KORST-9 dbms_type=ORA SAPSYSTEMNAME=EPP NLSPATH=/usr/lib/nls/msg/%L/%N:/usr/lib/nls/msg/%L/%N.cat:/usr/lib/nls/msg/%l.%c/%N:/usr/lib/nls/msg/%l.%c/%N.cat LIBPATH=/usr/sap/EPP/J00/exe:/usr/sap/EPP/J00/exe:/usr/sap/EPP/J00/exe:/usr/sap/EPP/SYS/exe/run:/usr/sap/EPP/SYS/exe/uc/rs6000_64:/oracle/client/11x_64/instantclient +epprd_rg:cl_deactivate_fs(1.394)[fs_umount:486] [[ true != true ]] +epprd_rg:cl_deactivate_fs(1.394)[fs_umount:482] : Process 18612598 has open files on /usr/sap. Record information about this +epprd_rg:cl_deactivate_fs(1.394)[fs_umount:483] : process in case anyone is later suprised by this action. +epprd_rg:cl_deactivate_fs(1.394)[fs_umount:485] ps ewwww 18612598 PID TTY STAT TIME COMMAND 18612598 - A 0:00 /usr/sap/hostctrl/exe/sapstartsrv pf=/usr/sap/hostctrl/exe/host_profile -D _=/usr/sap/hostctrl/exe/hostexecstart LANG=en_US THREAD=NOPS LOGIN=eppadm CLCMD_PASSTHRU=1 PATH=/usr/bin:/bin:/usr/sbin:/sbin:/oracle/EPP/112_64/bin:/etc:/usr/ucb:/home/eppadm/bin:/usr/bin/X11:.:/usr/sap/EPP/SYS/exe/uc/rs6000_64:/usr/sap/EPP/SYS/exe/run:/home/eppadm: NLS_LANG=AMERICAN_AMERICA.UTF8 EXTENDED_HISTORY=ON RSEC_SSFS_KEYPATH=/usr/sap/EPP/SYS/global/security/rsecssfs/key ORACLE_BASE=/oracle LC__FASTMSG=true LOGNAME=sapadm MAIL=/var/spool/mail/eppadm dbs_ora_tnsname=EPP ORACLE_SID=EPP LOCPATH=/usr/lib/nls/loc DB_SID=EPP DIR_LIBRARY=/usr/sap/EPP/SYS/exe/run USER=sapadm AUTHSTATE=files TNS_ADMIN=/usr/sap/EPP/SYS/profile/oracle IBM_JAVA_OPTIONS=-Xmx256M -Xj9 SAPEXE=/usr/sap/EPP/SYS/exe/run SHELL=/bin/csh ODMDIR=/etc/objrepos CLUSTER_OVERRIDE=yes HISTSIZE=10000 SAPDATA_HOME=/oracle/EPP RSEC_SSFS_DATAPATH=/usr/sap/EPP/SYS/global/security/rsecssfs/data HOME=/home/sapadm TERM=vt100 MAILMSG=[YOU HAVE NEW MAIL] ORACLE_HOME=/oracle/EPP/112_64 rsdb_ssfs_connect=0 PWD=/home/eppadm TZ=KORST-9 dbms_type=ORA SAPSYSTEMNAME=EPP NODNSSAPTRANSHOST=1 USERNAME=sapadm LIBPATH=/usr/sap/hostctrl/exe +epprd_rg:cl_deactivate_fs(1.400)[fs_umount:486] [[ true != true ]] +epprd_rg:cl_deactivate_fs(1.401)[fs_umount:482] : Process 19464692 has open files on /usr/sap. Record information about this +epprd_rg:cl_deactivate_fs(1.401)[fs_umount:483] : process in case anyone is later suprised by this action. +epprd_rg:cl_deactivate_fs(1.401)[fs_umount:485] ps ewwww 19464692 PID TTY STAT TIME COMMAND 19464692 - A 0:00 /usr/sap/DAA/SMDA97/exe/sapstartsrv pf=/usr/sap/DAA/SYS/profile/DAA_SMDA97_epprd -D _=/usr/sap/DAA/SMDA97/exe/sapstartsrv LANG=en_US LOGIN=daaadm CLCMD_PASSTHRU=1 PATH=/usr/bin:/etc:/usr/sbin:/usr/ucb:/usr/bin/X11:/sbin:/usr/java8_64/jre/bin:/usr/java8_64/bin:/usr/sap/DAA/SYS/exe/uc/rs6000_64:/usr/sap/DAA/SYS/exe/run:/home/daaadm:. EXTENDED_HISTORY=ON RSEC_SSFS_KEYPATH=/usr/sap/DAA/SYS/global/security/rsecssfs/key LC__FASTMSG=true LOGNAME=daaadm LOCPATH=/usr/lib/nls/loc DIR_LIBRARY=/usr/sap/DAA/SMDA97/exe USER=daaadm AUTHSTATE=files IBM_JAVA_OPTIONS=-Xmx256M -Xj9 SHELL=/bin/csh ODMDIR=/etc/objrepos CLUSTER_OVERRIDE=yes HISTSIZE=10000 RSEC_SSFS_DATAPATH=/usr/sap/DAA/SYS/global/security/rsecssfs/data HOME=/home/daaadm TERM=vt100 rsdb_ssfs_connect=0 PWD=/usr/sap/DAA/SYS/profile TZ=KORST-9 SAPSYSTEMNAME=DAA NLSPATH=/usr/lib/nls/msg/%L/%N:/usr/lib/nls/msg/%L/%N.cat:/usr/lib/nls/msg/%l.%c/%N:/usr/lib/nls/msg/%l.%c/%N.cat LIBPATH=/usr/sap/DAA/SMDA97/exe:/usr/sap/DAA/SMDA97/exe:/usr/sap/DAA/SMDA97/exe:/usr/sap/DAA/SYS/exe/run:/usr/sap/DAA/SYS/exe/uc/rs6000_64 +epprd_rg:cl_deactivate_fs(1.407)[fs_umount:486] [[ true != true ]] +epprd_rg:cl_deactivate_fs(1.407)[fs_umount:482] : Process 20709668 has open files on /usr/sap. Record information about this +epprd_rg:cl_deactivate_fs(1.407)[fs_umount:483] : process in case anyone is later suprised by this action. +epprd_rg:cl_deactivate_fs(1.407)[fs_umount:485] ps ewwww 20709668 PID TTY STAT TIME COMMAND 20709668 pts/1 A 0:00 -ksh USER=root LOGNAME=root LOGIN=root HOME=/ PATH=/usr/bin:/etc:/usr/sbin:/usr/ucb:/usr/bin/X11:/sbin:/usr/java8_64/jre/bin:/usr/java8_64/bin MAIL=/var/spool/mail/root SHELL=/usr/bin/ksh TZ=KORST-9 TERM=xterm-256color AUTHSTATE=compat LANG=en_US LOCPATH=/usr/lib/nls/loc LC__FASTMSG=true EXTENDED_HISTORY=ON HISTSIZE=10000 ODMDIR=/etc/objrepos CLCMD_PASSTHRU=1 CLUSTER_OVERRIDE=yes SSH_CLIENT=192.168.240.153 55455 22 SSH_CONNECTION=192.168.240.153 55455 61.81.244.134 22 SSH_TTY=/dev/pts/1 NLSPATH=/usr/lib/nls/msg/%L/%N:/usr/lib/nls/msg/%L/%N.cat:/usr/lib/nls/msg/%l.%c/%N:/usr/lib/nls/msg/%l.%c/%N.cat +epprd_rg:cl_deactivate_fs(1.415)[fs_umount:486] [[ true != true ]] +epprd_rg:cl_deactivate_fs(1.415)[fs_umount:482] : Process 24117562 has open files on /usr/sap. Record information about this +epprd_rg:cl_deactivate_fs(1.415)[fs_umount:483] : process in case anyone is later suprised by this action. +epprd_rg:cl_deactivate_fs(1.415)[fs_umount:485] ps ewwww 24117562 PID TTY STAT TIME COMMAND 24117562 - A 0:00 /usr/sap/hostctrl/exe/saphostexec pf=/usr/sap/hostctrl/exe/host_profile _=/usr/sap/hostctrl/exe/hostexecstart LANG=en_US THREAD=NOPS LOGIN=eppadm CLCMD_PASSTHRU=1 PATH=/usr/bin:/bin:/usr/sbin:/sbin:/oracle/EPP/112_64/bin:/etc:/usr/ucb:/home/eppadm/bin:/usr/bin/X11:.:/usr/sap/EPP/SYS/exe/uc/rs6000_64:/usr/sap/EPP/SYS/exe/run:/home/eppadm: NLS_LANG=AMERICAN_AMERICA.UTF8 EXTENDED_HISTORY=ON RSEC_SSFS_KEYPATH=/usr/sap/EPP/SYS/global/security/rsecssfs/key ORACLE_BASE=/oracle LC__FASTMSG=true LOGNAME=eppadm MAIL=/var/spool/mail/eppadm dbs_ora_tnsname=EPP ORACLE_SID=EPP LOCPATH=/usr/lib/nls/loc DB_SID=EPP DIR_LIBRARY=/usr/sap/EPP/SYS/exe/run USER=eppadm AUTHSTATE=files TNS_ADMIN=/usr/sap/EPP/SYS/profile/oracle IBM_JAVA_OPTIONS=-Xmx256M -Xj9 SAPEXE=/usr/sap/EPP/SYS/exe/run SHELL=/bin/csh ODMDIR=/etc/objrepos CLUSTER_OVERRIDE=yes HISTSIZE=10000 SAPDATA_HOME=/oracle/EPP RSEC_SSFS_DATAPATH=/usr/sap/EPP/SYS/global/security/rsecssfs/data HOME=/home/eppadm TERM=vt100 MAILMSG=[YOU HAVE NEW MAIL] ORACLE_HOME=/oracle/EPP/112_64 rsdb_ssfs_connect=0 PWD=/home/eppadm TZ=KORST-9 dbms_type=ORA SAPSYSTEMNAME=EPP NODNSSAPTRANSHOST=1 LIBPATH=/usr/sap/hostctrl/exe +epprd_rg:cl_deactivate_fs(1.421)[fs_umount:486] [[ true != true ]] +epprd_rg:cl_deactivate_fs(1.421)[fs_umount:482] : Process 26870066 has open files on /usr/sap. Record information about this +epprd_rg:cl_deactivate_fs(1.421)[fs_umount:483] : process in case anyone is later suprised by this action. +epprd_rg:cl_deactivate_fs(1.421)[fs_umount:485] ps ewwww 26870066 PID TTY STAT TIME COMMAND 26870066 - A 0:00 /usr/sap/EPP/SCS01/exe/sapstartsrv pf=/usr/sap/EPP/SYS/profile/EPP_SCS01_epprd -D _=/usr/sap/EPP/SCS01/exe/sapstartsrv LANG=en_US THREAD=NOPS LOGIN=eppadm CLCMD_PASSTHRU=1 PATH=/oracle/EPP/112_64/bin:/usr/bin:/etc:/usr/sbin:/usr/ucb:/home/eppadm/bin:/usr/bin/X11:/sbin:.:/usr/sap/EPP/SYS/exe/uc/rs6000_64:/usr/sap/EPP/SYS/exe/run:/home/eppadm NLS_LANG=AMERICAN_AMERICA.UTF8 EXTENDED_HISTORY=ON RSEC_SSFS_KEYPATH=/usr/sap/EPP/SYS/global/security/rsecssfs/key ORACLE_BASE=/oracle LC__FASTMSG=true LOGNAME=eppadm MAIL=/var/spool/mail/eppadm dbs_ora_tnsname=EPP ORACLE_SID=EPP LOCPATH=/usr/lib/nls/loc DB_SID=EPP DIR_LIBRARY=/usr/sap/EPP/SCS01/exe USER=eppadm AUTHSTATE=files TNS_ADMIN=/usr/sap/EPP/SYS/profile/oracle IBM_JAVA_OPTIONS=-Xmx256M -Xj9 SAPEXE=/usr/sap/EPP/SYS/exe/run SHELL=/bin/csh ODMDIR=/etc/objrepos CLUSTER_OVERRIDE=yes HISTSIZE=10000 SAPDATA_HOME=/oracle/EPP RSEC_SSFS_DATAPATH=/usr/sap/EPP/SYS/global/security/rsecssfs/data HOME=/home/eppadm TERM=vt100 MAILMSG=[YOU HAVE NEW MAIL] ORACLE_HOME=/oracle/EPP/112_64 rsdb_ssfs_connect=0 PWD=/usr/sap/EPP/SYS/profile TZ=KORST-9 dbms_type=ORA SAPSYSTEMNAME=EPP NLSPATH=/usr/lib/nls/msg/%L/%N:/usr/lib/nls/msg/%L/%N.cat:/usr/lib/nls/msg/%l.%c/%N:/usr/lib/nls/msg/%l.%c/%N.cat LIBPATH=/usr/sap/EPP/SCS01/exe:/usr/sap/EPP/SCS01/exe:/usr/sap/EPP/SCS01/exe:/usr/sap/EPP/SYS/exe/run:/usr/sap/EPP/SYS/exe/uc/rs6000_64:/oracle/client/11x_64/instantclient +epprd_rg:cl_deactivate_fs(1.428)[fs_umount:486] [[ true != true ]] +epprd_rg:cl_deactivate_fs(1.428)[fs_umount:517] fuser -O -k -u -x /dev/saplv /dev/saplv: 18088446c(eppadm) 18612598c(sapadm) 19464692c(daaadm) 20709668c(root) 24117562c(root) 26870066c(eppadm) +epprd_rg:cl_deactivate_fs(1.449)[fs_umount:518] fuser -O -k -u -x -c /usr/sap /usr/sap: +epprd_rg:cl_deactivate_fs(1.503)[fs_umount:519] date '+%h %d %H:%M:%S.000' Jan 28 19:42:34.000 +epprd_rg:cl_deactivate_fs(1.506)[fs_umount:522] : Wait 1 seconds for the kills to be effective +epprd_rg:cl_deactivate_fs(1.506)[fs_umount:524] [[ -n ' 18088446 18612598 19464692 20709668 24117562 26870066' ]] +epprd_rg:cl_deactivate_fs(1.506)[fs_umount:526] sleep 1 +epprd_rg:cl_deactivate_fs(2.506)[fs_umount:528] umount /usr/sap +epprd_rg:cl_deactivate_fs(3.165)[fs_umount:531] : Unmount of /usr/sap worked. Can stop now. +epprd_rg:cl_deactivate_fs(3.165)[fs_umount:533] break +epprd_rg:cl_deactivate_fs(3.165)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(3.165)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/usr/sap' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:36.018026 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:36.018026|INFO: Deactivating Filesystem|/usr/sap' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(3.196)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(3.196)[fs_umount:687] print -- 0 /dev/saplv /usr/sap +epprd_rg:cl_deactivate_fs(3.196)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(3.196)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/sapmnt[deactivate_fs_process_resources:764] PS4_LOOP=/sapmnt +epprd_rg:cl_deactivate_fs:/sapmnt[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/sapmnt[deactivate_fs_process_resources:770] fs_umount /sapmnt cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(3.196)[fs_umount:313] FS=/sapmnt +epprd_rg:cl_deactivate_fs(3.196)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(3.196)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(3.196)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(3.196)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(3.196)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(3.196)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(3.226)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(3.226)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(3.226)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(3.226)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(3.226)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(3.226)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(3.226)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(3.226)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(3.226)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(3.226)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(3.226)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(3.226)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(3.226)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(3.226)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(3.226)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(3.226)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(3.226)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(3.229)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(3.232)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/sapmnt +epprd_rg:cl_deactivate_fs(3.237)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(3.237)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(3.237)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(3.237)[fs_umount:367] lsfs -c /sapmnt +epprd_rg:cl_deactivate_fs(3.241)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/sapmnt:/dev/sapmntlv:jfs2:::20971520:rw:no:no' +epprd_rg:cl_deactivate_fs(3.241)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(3.242)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/sapmnt:/dev/sapmntlv:jfs2:::20971520:rw:no:no' +epprd_rg:cl_deactivate_fs(3.245)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(3.247)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(3.247)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(3.247)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(3.247)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(3.247)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(3.249)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(3.249)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(3.251)[fs_umount:394] awk '{ if ( $1 == "/dev/sapmntlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(3.255)[fs_umount:394] FS_MOUNTED=/sapmnt +epprd_rg:cl_deactivate_fs(3.255)[fs_umount:395] [[ -n /sapmnt ]] +epprd_rg:cl_deactivate_fs(3.255)[fs_umount:397] [[ /sapmnt != /sapmnt ]] +epprd_rg:cl_deactivate_fs(3.255)[fs_umount:409] [[ /sapmnt == / ]] +epprd_rg:cl_deactivate_fs(3.255)[fs_umount:409] [[ /sapmnt == /usr ]] +epprd_rg:cl_deactivate_fs(3.255)[fs_umount:409] [[ /sapmnt == /dev ]] +epprd_rg:cl_deactivate_fs(3.256)[fs_umount:409] [[ /sapmnt == /proc ]] +epprd_rg:cl_deactivate_fs(3.256)[fs_umount:409] [[ /sapmnt == /var ]] +epprd_rg:cl_deactivate_fs(3.256)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/sapmnt' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:36.107452 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:36.107452|INFO: Deactivating Filesystem|/sapmnt' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(3.285)[fs_umount:427] : Try up to 60 times to unmount /sapmnt +epprd_rg:cl_deactivate_fs(3.285)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(3.285)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(3.285)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(3.288)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 19:42:36.000 +epprd_rg:cl_deactivate_fs(3.288)[fs_umount:434] umount /sapmnt +epprd_rg:cl_deactivate_fs(3.504)[fs_umount:437] : Unmount of /sapmnt worked. Can stop now. +epprd_rg:cl_deactivate_fs(3.504)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(3.504)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(3.504)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/sapmnt' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:36.356919 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:36.356919|INFO: Deactivating Filesystem|/sapmnt' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(3.535)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(3.535)[fs_umount:687] print -- 0 /dev/sapmntlv /sapmnt +epprd_rg:cl_deactivate_fs(3.535)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(3.535)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata4[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata4[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata4[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/sapdata4 cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(3.535)[fs_umount:313] FS=/oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(3.535)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(3.535)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(3.535)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(3.535)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(3.535)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(3.535)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(3.555)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(3.555)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(3.555)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(3.555)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(3.555)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(3.555)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(3.555)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(3.555)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(3.555)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(3.555)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(3.555)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(3.555)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(3.555)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(3.555)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(3.555)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(3.555)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(3.555)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(3.556)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(3.558)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(3.563)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(3.563)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(3.563)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(3.563)[fs_umount:367] lsfs -c /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(3.566)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata4:/dev/sapdata4lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(3.566)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(3.567)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata4:/dev/sapdata4lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(3.568)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(3.570)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(3.571)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(3.571)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(3.571)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(3.571)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(3.572)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(3.572)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(3.575)[fs_umount:394] awk '{ if ( $1 == "/dev/sapdata4lv" ) print $2 }' +epprd_rg:cl_deactivate_fs(3.579)[fs_umount:394] FS_MOUNTED=/oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(3.579)[fs_umount:395] [[ -n /oracle/EPP/sapdata4 ]] +epprd_rg:cl_deactivate_fs(3.579)[fs_umount:397] [[ /oracle/EPP/sapdata4 != /oracle/EPP/sapdata4 ]] +epprd_rg:cl_deactivate_fs(3.579)[fs_umount:409] [[ /oracle/EPP/sapdata4 == / ]] +epprd_rg:cl_deactivate_fs(3.579)[fs_umount:409] [[ /oracle/EPP/sapdata4 == /usr ]] +epprd_rg:cl_deactivate_fs(3.579)[fs_umount:409] [[ /oracle/EPP/sapdata4 == /dev ]] +epprd_rg:cl_deactivate_fs(3.579)[fs_umount:409] [[ /oracle/EPP/sapdata4 == /proc ]] +epprd_rg:cl_deactivate_fs(3.579)[fs_umount:409] [[ /oracle/EPP/sapdata4 == /var ]] +epprd_rg:cl_deactivate_fs(3.579)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:36.431789 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:36.431789|INFO: Deactivating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(3.609)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(3.609)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(3.610)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(3.610)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(3.612)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 19:42:36.000 +epprd_rg:cl_deactivate_fs(3.612)[fs_umount:434] umount /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(3.683)[fs_umount:437] : Unmount of /oracle/EPP/sapdata4 worked. Can stop now. +epprd_rg:cl_deactivate_fs(3.683)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(3.683)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(3.683)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:36.535817 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:36.535817|INFO: Deactivating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(3.714)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(3.714)[fs_umount:687] print -- 0 /dev/sapdata4lv /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(3.714)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(3.714)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata3[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata3[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata3[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/sapdata3 cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(3.714)[fs_umount:313] FS=/oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(3.714)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(3.714)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(3.714)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(3.714)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(3.714)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(3.714)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(3.734)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(3.734)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(3.734)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(3.734)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(3.734)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(3.734)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(3.734)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(3.734)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(3.734)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(3.734)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(3.734)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(3.734)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(3.734)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(3.734)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(3.734)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(3.734)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(3.734)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(3.736)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(3.738)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(3.742)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(3.743)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(3.743)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(3.743)[fs_umount:367] lsfs -c /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(3.746)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata3:/dev/sapdata3lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(3.746)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(3.747)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata3:/dev/sapdata3lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(3.749)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(3.751)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(3.751)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(3.751)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(3.751)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(3.751)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(3.752)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(3.753)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(3.755)[fs_umount:394] awk '{ if ( $1 == "/dev/sapdata3lv" ) print $2 }' +epprd_rg:cl_deactivate_fs(3.759)[fs_umount:394] FS_MOUNTED=/oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(3.759)[fs_umount:395] [[ -n /oracle/EPP/sapdata3 ]] +epprd_rg:cl_deactivate_fs(3.759)[fs_umount:397] [[ /oracle/EPP/sapdata3 != /oracle/EPP/sapdata3 ]] +epprd_rg:cl_deactivate_fs(3.759)[fs_umount:409] [[ /oracle/EPP/sapdata3 == / ]] +epprd_rg:cl_deactivate_fs(3.759)[fs_umount:409] [[ /oracle/EPP/sapdata3 == /usr ]] +epprd_rg:cl_deactivate_fs(3.759)[fs_umount:409] [[ /oracle/EPP/sapdata3 == /dev ]] +epprd_rg:cl_deactivate_fs(3.759)[fs_umount:409] [[ /oracle/EPP/sapdata3 == /proc ]] +epprd_rg:cl_deactivate_fs(3.759)[fs_umount:409] [[ /oracle/EPP/sapdata3 == /var ]] +epprd_rg:cl_deactivate_fs(3.759)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:36.611558 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:36.611558|INFO: Deactivating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(3.789)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(3.789)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(3.789)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(3.789)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(3.792)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 19:42:36.000 +epprd_rg:cl_deactivate_fs(3.792)[fs_umount:434] umount /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(3.861)[fs_umount:437] : Unmount of /oracle/EPP/sapdata3 worked. Can stop now. +epprd_rg:cl_deactivate_fs(3.861)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(3.862)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(3.862)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:36.714809 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:36.714809|INFO: Deactivating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(3.893)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(3.893)[fs_umount:687] print -- 0 /dev/sapdata3lv /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(3.893)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(3.893)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata2[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata2[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata2[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/sapdata2 cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(3.893)[fs_umount:313] FS=/oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(3.893)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(3.893)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(3.893)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(3.893)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(3.893)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(3.893)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(3.913)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(3.913)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(3.913)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(3.913)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(3.913)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(3.913)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(3.913)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(3.913)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(3.913)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(3.913)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(3.913)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(3.913)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(3.913)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(3.913)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(3.913)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(3.913)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(3.913)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(3.915)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(3.917)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(3.921)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(3.921)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(3.921)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(3.921)[fs_umount:367] lsfs -c /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(3.924)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata2:/dev/sapdata2lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(3.924)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(3.925)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata2:/dev/sapdata2lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(3.928)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(3.928)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(3.928)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(3.930)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(3.930)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(3.930)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(3.931)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(3.931)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(3.933)[fs_umount:394] awk '{ if ( $1 == "/dev/sapdata2lv" ) print $2 }' +epprd_rg:cl_deactivate_fs(3.937)[fs_umount:394] FS_MOUNTED=/oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(3.937)[fs_umount:395] [[ -n /oracle/EPP/sapdata2 ]] +epprd_rg:cl_deactivate_fs(3.937)[fs_umount:397] [[ /oracle/EPP/sapdata2 != /oracle/EPP/sapdata2 ]] +epprd_rg:cl_deactivate_fs(3.937)[fs_umount:409] [[ /oracle/EPP/sapdata2 == / ]] +epprd_rg:cl_deactivate_fs(3.937)[fs_umount:409] [[ /oracle/EPP/sapdata2 == /usr ]] +epprd_rg:cl_deactivate_fs(3.937)[fs_umount:409] [[ /oracle/EPP/sapdata2 == /dev ]] +epprd_rg:cl_deactivate_fs(3.937)[fs_umount:409] [[ /oracle/EPP/sapdata2 == /proc ]] +epprd_rg:cl_deactivate_fs(3.937)[fs_umount:409] [[ /oracle/EPP/sapdata2 == /var ]] +epprd_rg:cl_deactivate_fs(3.938)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:36.790229 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:36.790229|INFO: Deactivating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(3.968)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(3.968)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(3.968)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(3.968)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(3.971)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 19:42:36.000 +epprd_rg:cl_deactivate_fs(3.971)[fs_umount:434] umount /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(4.040)[fs_umount:437] : Unmount of /oracle/EPP/sapdata2 worked. Can stop now. +epprd_rg:cl_deactivate_fs(4.040)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(4.040)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(4.040)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:36.893467 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:36.893467|INFO: Deactivating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(4.071)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(4.071)[fs_umount:687] print -- 0 /dev/sapdata2lv /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(4.071)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(4.071)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata1[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata1[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata1[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/sapdata1 cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(4.071)[fs_umount:313] FS=/oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(4.071)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(4.071)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(4.071)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(4.071)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(4.071)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(4.071)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(4.092)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(4.092)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(4.092)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(4.092)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(4.092)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(4.092)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(4.092)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(4.092)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(4.092)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(4.092)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(4.092)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(4.092)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(4.092)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(4.092)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(4.092)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(4.092)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(4.092)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(4.093)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(4.095)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(4.099)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(4.100)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(4.100)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(4.100)[fs_umount:367] lsfs -c /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(4.103)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata1:/dev/sapdata1lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(4.103)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(4.104)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata1:/dev/sapdata1lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(4.106)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(4.108)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(4.108)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(4.108)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(4.108)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(4.108)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(4.109)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(4.110)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(4.111)[fs_umount:394] awk '{ if ( $1 == "/dev/sapdata1lv" ) print $2 }' +epprd_rg:cl_deactivate_fs(4.116)[fs_umount:394] FS_MOUNTED=/oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(4.116)[fs_umount:395] [[ -n /oracle/EPP/sapdata1 ]] +epprd_rg:cl_deactivate_fs(4.116)[fs_umount:397] [[ /oracle/EPP/sapdata1 != /oracle/EPP/sapdata1 ]] +epprd_rg:cl_deactivate_fs(4.116)[fs_umount:409] [[ /oracle/EPP/sapdata1 == / ]] +epprd_rg:cl_deactivate_fs(4.116)[fs_umount:409] [[ /oracle/EPP/sapdata1 == /usr ]] +epprd_rg:cl_deactivate_fs(4.116)[fs_umount:409] [[ /oracle/EPP/sapdata1 == /dev ]] +epprd_rg:cl_deactivate_fs(4.116)[fs_umount:409] [[ /oracle/EPP/sapdata1 == /proc ]] +epprd_rg:cl_deactivate_fs(4.116)[fs_umount:409] [[ /oracle/EPP/sapdata1 == /var ]] +epprd_rg:cl_deactivate_fs(4.116)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:36.967882 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:36.967882|INFO: Deactivating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(4.146)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(4.146)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(4.146)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(4.146)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(4.148)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 19:42:36.000 +epprd_rg:cl_deactivate_fs(4.148)[fs_umount:434] umount /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(4.219)[fs_umount:437] : Unmount of /oracle/EPP/sapdata1 worked. Can stop now. +epprd_rg:cl_deactivate_fs(4.219)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(4.219)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(4.219)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:37.071052 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:37.071052|INFO: Deactivating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(4.249)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(4.249)[fs_umount:687] print -- 0 /dev/sapdata1lv /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(4.249)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(4.249)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/origlogB[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs:/oracle/EPP/origlogB[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/origlogB[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/origlogB cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(4.249)[fs_umount:313] FS=/oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(4.249)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(4.249)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(4.249)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(4.249)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(4.249)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(4.249)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(4.270)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(4.270)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(4.270)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(4.270)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(4.270)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(4.270)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(4.270)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(4.270)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(4.270)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(4.270)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(4.270)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(4.270)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(4.270)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(4.270)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(4.270)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(4.270)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(4.270)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(4.272)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(4.272)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(4.276)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(4.276)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(4.276)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(4.276)[fs_umount:367] lsfs -c /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(4.280)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogB:/dev/origlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(4.280)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(4.281)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogB:/dev/origlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(4.281)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(4.281)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(4.282)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(4.284)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(4.284)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(4.284)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(4.286)[fs_umount:394] awk '{ if ( $1 == "/dev/origlogBlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(4.286)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(4.286)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(4.290)[fs_umount:394] FS_MOUNTED=/oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(4.290)[fs_umount:395] [[ -n /oracle/EPP/origlogB ]] +epprd_rg:cl_deactivate_fs(4.290)[fs_umount:397] [[ /oracle/EPP/origlogB != /oracle/EPP/origlogB ]] +epprd_rg:cl_deactivate_fs(4.290)[fs_umount:409] [[ /oracle/EPP/origlogB == / ]] +epprd_rg:cl_deactivate_fs(4.290)[fs_umount:409] [[ /oracle/EPP/origlogB == /usr ]] +epprd_rg:cl_deactivate_fs(4.290)[fs_umount:409] [[ /oracle/EPP/origlogB == /dev ]] +epprd_rg:cl_deactivate_fs(4.290)[fs_umount:409] [[ /oracle/EPP/origlogB == /proc ]] +epprd_rg:cl_deactivate_fs(4.290)[fs_umount:409] [[ /oracle/EPP/origlogB == /var ]] +epprd_rg:cl_deactivate_fs(4.290)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:37.142267 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:37.142267|INFO: Deactivating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(4.320)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(4.320)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(4.320)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(4.320)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(4.323)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 19:42:37.000 +epprd_rg:cl_deactivate_fs(4.323)[fs_umount:434] umount /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(4.393)[fs_umount:437] : Unmount of /oracle/EPP/origlogB worked. Can stop now. +epprd_rg:cl_deactivate_fs(4.394)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(4.394)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(4.394)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:37.246101 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:37.246101|INFO: Deactivating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(4.424)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(4.424)[fs_umount:687] print -- 0 /dev/origlogBlv /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(4.424)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(4.424)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/origlogA[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs:/oracle/EPP/origlogA[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/origlogA[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/origlogA cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(4.424)[fs_umount:313] FS=/oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(4.424)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(4.424)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(4.424)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(4.424)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(4.424)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(4.424)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(4.445)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(4.445)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(4.445)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(4.445)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(4.445)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(4.445)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(4.445)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(4.445)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(4.445)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(4.445)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(4.445)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(4.445)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(4.445)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(4.445)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(4.445)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(4.445)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(4.445)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(4.447)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(4.447)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(4.451)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(4.451)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(4.451)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(4.451)[fs_umount:367] lsfs -c /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(4.455)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogA:/dev/origlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(4.455)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(4.456)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogA:/dev/origlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(4.457)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(4.456)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(4.457)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(4.459)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(4.459)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(4.459)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(4.461)[fs_umount:394] awk '{ if ( $1 == "/dev/origlogAlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(4.461)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(4.461)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(4.465)[fs_umount:394] FS_MOUNTED=/oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(4.465)[fs_umount:395] [[ -n /oracle/EPP/origlogA ]] +epprd_rg:cl_deactivate_fs(4.465)[fs_umount:397] [[ /oracle/EPP/origlogA != /oracle/EPP/origlogA ]] +epprd_rg:cl_deactivate_fs(4.465)[fs_umount:409] [[ /oracle/EPP/origlogA == / ]] +epprd_rg:cl_deactivate_fs(4.465)[fs_umount:409] [[ /oracle/EPP/origlogA == /usr ]] +epprd_rg:cl_deactivate_fs(4.465)[fs_umount:409] [[ /oracle/EPP/origlogA == /dev ]] +epprd_rg:cl_deactivate_fs(4.465)[fs_umount:409] [[ /oracle/EPP/origlogA == /proc ]] +epprd_rg:cl_deactivate_fs(4.465)[fs_umount:409] [[ /oracle/EPP/origlogA == /var ]] +epprd_rg:cl_deactivate_fs(4.465)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:37.317086 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:37.317086|INFO: Deactivating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(4.495)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(4.495)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(4.495)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(4.495)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(4.498)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 19:42:37.000 +epprd_rg:cl_deactivate_fs(4.498)[fs_umount:434] umount /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(4.569)[fs_umount:437] : Unmount of /oracle/EPP/origlogA worked. Can stop now. +epprd_rg:cl_deactivate_fs(4.569)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(4.569)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(4.569)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:37.421284 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:37.421284|INFO: Deactivating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(4.599)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(4.599)[fs_umount:687] print -- 0 /dev/origlogAlv /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(4.599)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(4.599)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/oraarch[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs:/oracle/EPP/oraarch[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/oraarch[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/oraarch cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(4.599)[fs_umount:313] FS=/oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(4.599)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(4.599)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(4.599)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(4.599)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(4.599)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(4.599)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(4.620)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(4.620)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(4.620)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(4.620)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(4.620)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(4.620)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(4.620)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(4.620)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(4.620)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(4.620)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(4.620)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(4.620)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(4.620)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(4.620)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(4.620)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(4.620)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(4.620)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(4.622)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(4.622)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(4.627)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(4.627)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(4.627)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(4.627)[fs_umount:367] lsfs -c /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(4.630)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/oraarch:/dev/oraarchlv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(4.630)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(4.631)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/oraarch:/dev/oraarchlv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(4.632)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(4.632)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(4.632)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(4.634)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(4.634)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(4.634)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(4.636)[fs_umount:394] awk '{ if ( $1 == "/dev/oraarchlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(4.636)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(4.636)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(4.640)[fs_umount:394] FS_MOUNTED=/oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(4.640)[fs_umount:395] [[ -n /oracle/EPP/oraarch ]] +epprd_rg:cl_deactivate_fs(4.640)[fs_umount:397] [[ /oracle/EPP/oraarch != /oracle/EPP/oraarch ]] +epprd_rg:cl_deactivate_fs(4.640)[fs_umount:409] [[ /oracle/EPP/oraarch == / ]] +epprd_rg:cl_deactivate_fs(4.640)[fs_umount:409] [[ /oracle/EPP/oraarch == /usr ]] +epprd_rg:cl_deactivate_fs(4.640)[fs_umount:409] [[ /oracle/EPP/oraarch == /dev ]] +epprd_rg:cl_deactivate_fs(4.640)[fs_umount:409] [[ /oracle/EPP/oraarch == /proc ]] +epprd_rg:cl_deactivate_fs(4.640)[fs_umount:409] [[ /oracle/EPP/oraarch == /var ]] +epprd_rg:cl_deactivate_fs(4.640)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:37.492036 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:37.492036|INFO: Deactivating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(4.670)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(4.670)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(4.670)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(4.670)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(4.673)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 19:42:37.000 +epprd_rg:cl_deactivate_fs(4.673)[fs_umount:434] umount /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(4.743)[fs_umount:437] : Unmount of /oracle/EPP/oraarch worked. Can stop now. +epprd_rg:cl_deactivate_fs(4.743)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(4.743)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(4.743)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:37.595676 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:37.595676|INFO: Deactivating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(4.773)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(4.773)[fs_umount:687] print -- 0 /dev/oraarchlv /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(4.773)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(4.773)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/mirrlogB[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs:/oracle/EPP/mirrlogB[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/mirrlogB[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/mirrlogB cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(4.774)[fs_umount:313] FS=/oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(4.774)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(4.774)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(4.774)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(4.774)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(4.774)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(4.774)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(4.794)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(4.795)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(4.795)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(4.795)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(4.795)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(4.795)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(4.795)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(4.795)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(4.795)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(4.795)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(4.795)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(4.795)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(4.795)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(4.795)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(4.795)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(4.795)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(4.795)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(4.796)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(4.796)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(4.801)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(4.801)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(4.801)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(4.801)[fs_umount:367] lsfs -c /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(4.804)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogB:/dev/mirrlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(4.804)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(4.806)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogB:/dev/mirrlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(4.807)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(4.806)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(4.807)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(4.809)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(4.809)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(4.809)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(4.810)[fs_umount:394] awk '{ if ( $1 == "/dev/mirrlogBlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(4.810)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(4.811)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(4.815)[fs_umount:394] FS_MOUNTED=/oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(4.815)[fs_umount:395] [[ -n /oracle/EPP/mirrlogB ]] +epprd_rg:cl_deactivate_fs(4.815)[fs_umount:397] [[ /oracle/EPP/mirrlogB != /oracle/EPP/mirrlogB ]] +epprd_rg:cl_deactivate_fs(4.815)[fs_umount:409] [[ /oracle/EPP/mirrlogB == / ]] +epprd_rg:cl_deactivate_fs(4.815)[fs_umount:409] [[ /oracle/EPP/mirrlogB == /usr ]] +epprd_rg:cl_deactivate_fs(4.815)[fs_umount:409] [[ /oracle/EPP/mirrlogB == /dev ]] +epprd_rg:cl_deactivate_fs(4.815)[fs_umount:409] [[ /oracle/EPP/mirrlogB == /proc ]] +epprd_rg:cl_deactivate_fs(4.815)[fs_umount:409] [[ /oracle/EPP/mirrlogB == /var ]] +epprd_rg:cl_deactivate_fs(4.815)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:37.666870 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:37.666870|INFO: Deactivating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(4.844)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(4.845)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(4.845)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(4.845)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(4.847)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 19:42:37.000 +epprd_rg:cl_deactivate_fs(4.848)[fs_umount:434] umount /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(4.918)[fs_umount:437] : Unmount of /oracle/EPP/mirrlogB worked. Can stop now. +epprd_rg:cl_deactivate_fs(4.918)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(4.918)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(4.918)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:37.770147 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:37.770147|INFO: Deactivating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(4.948)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(4.948)[fs_umount:687] print -- 0 /dev/mirrlogBlv /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(4.948)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(4.948)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/mirrlogA[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs:/oracle/EPP/mirrlogA[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/mirrlogA[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/mirrlogA cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(4.948)[fs_umount:313] FS=/oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(4.948)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(4.948)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(4.948)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(4.948)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(4.948)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(4.948)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(4.969)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(4.969)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(4.969)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(4.969)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(4.969)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(4.969)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(4.969)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(4.969)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(4.969)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(4.969)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(4.969)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(4.969)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(4.969)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(4.969)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(4.969)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(4.969)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(4.969)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(4.971)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(4.971)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(4.975)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(4.975)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(4.976)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(4.976)[fs_umount:367] lsfs -c /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(4.979)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogA:/dev/mirrlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(4.979)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(4.980)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogA:/dev/mirrlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(4.981)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(4.981)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(4.981)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(4.983)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(4.983)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(4.983)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(4.985)[fs_umount:394] awk '{ if ( $1 == "/dev/mirrlogAlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(4.985)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(4.985)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(4.989)[fs_umount:394] FS_MOUNTED=/oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(4.989)[fs_umount:395] [[ -n /oracle/EPP/mirrlogA ]] +epprd_rg:cl_deactivate_fs(4.989)[fs_umount:397] [[ /oracle/EPP/mirrlogA != /oracle/EPP/mirrlogA ]] +epprd_rg:cl_deactivate_fs(4.990)[fs_umount:409] [[ /oracle/EPP/mirrlogA == / ]] +epprd_rg:cl_deactivate_fs(4.990)[fs_umount:409] [[ /oracle/EPP/mirrlogA == /usr ]] +epprd_rg:cl_deactivate_fs(4.990)[fs_umount:409] [[ /oracle/EPP/mirrlogA == /dev ]] +epprd_rg:cl_deactivate_fs(4.990)[fs_umount:409] [[ /oracle/EPP/mirrlogA == /proc ]] +epprd_rg:cl_deactivate_fs(4.990)[fs_umount:409] [[ /oracle/EPP/mirrlogA == /var ]] +epprd_rg:cl_deactivate_fs(4.990)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:37.841469 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:37.841469|INFO: Deactivating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(5.019)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(5.019)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(5.019)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(5.019)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(5.022)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 19:42:37.000 +epprd_rg:cl_deactivate_fs(5.022)[fs_umount:434] umount /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(5.092)[fs_umount:437] : Unmount of /oracle/EPP/mirrlogA worked. Can stop now. +epprd_rg:cl_deactivate_fs(5.092)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(5.093)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(5.093)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:37.944982 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:37.944982|INFO: Deactivating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(5.123)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(5.123)[fs_umount:687] print -- 0 /dev/mirrlogAlv /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(5.123)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(5.123)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP +epprd_rg:cl_deactivate_fs:/oracle/EPP[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP[deactivate_fs_process_resources:770] fs_umount /oracle/EPP cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(5.123)[fs_umount:313] FS=/oracle/EPP +epprd_rg:cl_deactivate_fs(5.123)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(5.123)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(5.123)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(5.123)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(5.123)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(5.123)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(5.144)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(5.144)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(5.144)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(5.144)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(5.144)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(5.144)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(5.144)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(5.144)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(5.144)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(5.144)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(5.144)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(5.144)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(5.144)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(5.144)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(5.144)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(5.144)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(5.144)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(5.146)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP +epprd_rg:cl_deactivate_fs(5.146)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(5.150)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(5.150)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(5.150)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(5.150)[fs_umount:367] lsfs -c /oracle/EPP +epprd_rg:cl_deactivate_fs(5.154)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP:/dev/epplv:jfs2:::62914560:rw:no:no' +epprd_rg:cl_deactivate_fs(5.154)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(5.155)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP:/dev/epplv:jfs2:::62914560:rw:no:no' +epprd_rg:cl_deactivate_fs(5.156)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(5.155)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(5.155)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(5.158)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(5.158)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(5.158)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(5.160)[fs_umount:394] awk '{ if ( $1 == "/dev/epplv" ) print $2 }' +epprd_rg:cl_deactivate_fs(5.160)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(5.160)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(5.164)[fs_umount:394] FS_MOUNTED=/oracle/EPP +epprd_rg:cl_deactivate_fs(5.164)[fs_umount:395] [[ -n /oracle/EPP ]] +epprd_rg:cl_deactivate_fs(5.164)[fs_umount:397] [[ /oracle/EPP != /oracle/EPP ]] +epprd_rg:cl_deactivate_fs(5.164)[fs_umount:409] [[ /oracle/EPP == / ]] +epprd_rg:cl_deactivate_fs(5.164)[fs_umount:409] [[ /oracle/EPP == /usr ]] +epprd_rg:cl_deactivate_fs(5.164)[fs_umount:409] [[ /oracle/EPP == /dev ]] +epprd_rg:cl_deactivate_fs(5.164)[fs_umount:409] [[ /oracle/EPP == /proc ]] +epprd_rg:cl_deactivate_fs(5.164)[fs_umount:409] [[ /oracle/EPP == /var ]] +epprd_rg:cl_deactivate_fs(5.164)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:38.015978 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:38.015978|INFO: Deactivating Filesystem|/oracle/EPP' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(5.194)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP +epprd_rg:cl_deactivate_fs(5.194)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(5.194)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(5.194)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(5.197)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 19:42:38.000 +epprd_rg:cl_deactivate_fs(5.197)[fs_umount:434] umount /oracle/EPP +epprd_rg:cl_deactivate_fs(5.457)[fs_umount:437] : Unmount of /oracle/EPP worked. Can stop now. +epprd_rg:cl_deactivate_fs(5.457)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(5.457)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(5.457)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:38.308531 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:38.308531|INFO: Deactivating Filesystem|/oracle/EPP' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(5.486)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(5.486)[fs_umount:687] print -- 0 /dev/epplv /oracle/EPP +epprd_rg:cl_deactivate_fs(5.486)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(5.486)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle[deactivate_fs_process_resources:764] PS4_LOOP=/oracle +epprd_rg:cl_deactivate_fs:/oracle[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle[deactivate_fs_process_resources:770] fs_umount /oracle cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(5.486)[fs_umount:313] FS=/oracle +epprd_rg:cl_deactivate_fs(5.486)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(5.486)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(5.486)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(5.486)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(5.486)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(5.486)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(5.507)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(5.507)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(5.507)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(5.507)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(5.507)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(5.507)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(5.507)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(5.507)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(5.507)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(5.507)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(5.507)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(5.507)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(5.507)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(5.507)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(5.507)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(5.507)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(5.507)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(5.509)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle +epprd_rg:cl_deactivate_fs(5.509)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(5.513)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(5.513)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(5.513)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(5.513)[fs_umount:367] lsfs -c /oracle +epprd_rg:cl_deactivate_fs(5.517)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle:/dev/oraclelv:jfs2:::41943040:rw:no:no' +epprd_rg:cl_deactivate_fs(5.517)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(5.518)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle:/dev/oraclelv:jfs2:::41943040:rw:no:no' +epprd_rg:cl_deactivate_fs(5.519)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(5.519)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(5.519)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(5.521)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(5.521)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(5.521)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(5.523)[fs_umount:394] awk '{ if ( $1 == "/dev/oraclelv" ) print $2 }' +epprd_rg:cl_deactivate_fs(5.522)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(5.523)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(5.527)[fs_umount:394] FS_MOUNTED=/oracle +epprd_rg:cl_deactivate_fs(5.527)[fs_umount:395] [[ -n /oracle ]] +epprd_rg:cl_deactivate_fs(5.527)[fs_umount:397] [[ /oracle != /oracle ]] +epprd_rg:cl_deactivate_fs(5.527)[fs_umount:409] [[ /oracle == / ]] +epprd_rg:cl_deactivate_fs(5.527)[fs_umount:409] [[ /oracle == /usr ]] +epprd_rg:cl_deactivate_fs(5.527)[fs_umount:409] [[ /oracle == /dev ]] +epprd_rg:cl_deactivate_fs(5.527)[fs_umount:409] [[ /oracle == /proc ]] +epprd_rg:cl_deactivate_fs(5.527)[fs_umount:409] [[ /oracle == /var ]] +epprd_rg:cl_deactivate_fs(5.527)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:38.378637 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:38.378637|INFO: Deactivating Filesystem|/oracle' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(5.556)[fs_umount:427] : Try up to 60 times to unmount /oracle +epprd_rg:cl_deactivate_fs(5.556)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(5.556)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(5.556)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(5.559)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 19:42:38.000 +epprd_rg:cl_deactivate_fs(5.559)[fs_umount:434] umount /oracle +epprd_rg:cl_deactivate_fs(5.634)[fs_umount:437] : Unmount of /oracle worked. Can stop now. +epprd_rg:cl_deactivate_fs(5.634)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(5.634)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(5.634)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:38.486281 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:38.486281|INFO: Deactivating Filesystem|/oracle' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(5.664)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(5.664)[fs_umount:687] print -- 0 /dev/oraclelv /oracle +epprd_rg:cl_deactivate_fs(5.664)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(5.664)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/board_org[deactivate_fs_process_resources:764] PS4_LOOP=/board_org +epprd_rg:cl_deactivate_fs:/board_org[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/board_org[deactivate_fs_process_resources:770] fs_umount /board_org cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(5.664)[fs_umount:313] FS=/board_org +epprd_rg:cl_deactivate_fs(5.664)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(5.664)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(5.664)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(5.664)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(5.664)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(5.664)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(5.685)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(5.685)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(5.685)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(5.685)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(5.685)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(5.685)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(5.685)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(5.685)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(5.685)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(5.685)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(5.685)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(5.685)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(5.685)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(5.685)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(5.685)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(5.685)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(5.685)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(5.687)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/board_org +epprd_rg:cl_deactivate_fs(5.687)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(5.691)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(5.691)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(5.691)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(5.691)[fs_umount:367] lsfs -c /board_org +epprd_rg:cl_deactivate_fs(5.694)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(5.694)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(5.696)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(5.697)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(5.697)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(5.697)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(5.698)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(5.698)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(5.698)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(5.700)[fs_umount:394] awk '{ if ( $1 == "/dev/boardlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(5.700)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(5.700)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(5.705)[fs_umount:394] FS_MOUNTED=/board_org +epprd_rg:cl_deactivate_fs(5.705)[fs_umount:395] [[ -n /board_org ]] +epprd_rg:cl_deactivate_fs(5.705)[fs_umount:397] [[ /board_org != /board_org ]] +epprd_rg:cl_deactivate_fs(5.705)[fs_umount:409] [[ /board_org == / ]] +epprd_rg:cl_deactivate_fs(5.705)[fs_umount:409] [[ /board_org == /usr ]] +epprd_rg:cl_deactivate_fs(5.705)[fs_umount:409] [[ /board_org == /dev ]] +epprd_rg:cl_deactivate_fs(5.705)[fs_umount:409] [[ /board_org == /proc ]] +epprd_rg:cl_deactivate_fs(5.705)[fs_umount:409] [[ /board_org == /var ]] +epprd_rg:cl_deactivate_fs(5.705)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/board_org' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:38.555893 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:38.555893|INFO: Deactivating Filesystem|/board_org' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(5.734)[fs_umount:427] : Try up to 60 times to unmount /board_org +epprd_rg:cl_deactivate_fs(5.734)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(5.734)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(5.734)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(5.736)[fs_umount:432] : Attempt 1 of 60 to unmount at Jan 28 19:42:38.000 +epprd_rg:cl_deactivate_fs(5.736)[fs_umount:434] umount /board_org +epprd_rg:cl_deactivate_fs(5.806)[fs_umount:437] : Unmount of /board_org worked. Can stop now. +epprd_rg:cl_deactivate_fs(5.806)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(5.806)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(5.806)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/board_org' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-01-28T19:42:38.658025 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-01-28T19:42:38.658025|INFO: Deactivating Filesystem|/board_org' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(5.836)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(5.836)[fs_umount:687] print -- 0 /dev/boardlv /board_org +epprd_rg:cl_deactivate_fs(5.836)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(5.836)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/board_org[deactivate_fs_process_resources:773] unset PS4_LOOP +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:777] [[ -n '' ]] +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:786] ALLNOERROR=All_non_error_filesystems +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:788] : update resource manager +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:790] cl_RMupdate resource_down All_non_error_filesystems cl_deactivate_fs 2023-01-28T19:42:38.681437 2023-01-28T19:42:38.685955 +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:794] : Check to see how the unmounts went +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:796] [[ -s /tmp/epprd_rg_deactivate_fs.tmp ]] +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:798] grep -qw ^1 /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:805] grep -qw ^11 /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:814] : All unmounts successful +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:816] STATUS=0 +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:817] rm -f /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:821] return 0 +epprd_rg:cl_deactivate_fs[924] exit 0 +epprd_rg:process_resources[process_file_systems:2668] RC=0 +epprd_rg:process_resources[process_file_systems:2669] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[process_file_systems:2681] (( 0 != 0 )) +epprd_rg:process_resources[process_file_systems:2687] return 0 +epprd_rg:process_resources[3483] RC=0 +epprd_rg:process_resources[3485] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3487] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T19:42:38.708628 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=VGS ACTION=RELEASE VOLUME_GROUPS='"datavg"' RESOURCE_GROUPS='"epprd_rg' '"' EXPORT_FILESYSTEM='"TRUE"' +epprd_rg:process_resources[1] JOB_TYPE=VGS +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] VOLUME_GROUPS=datavg +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] EXPORT_FILESYSTEM=TRUE +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ VGS == RELEASE ]] +epprd_rg:process_resources[3360] [[ VGS == ONLINE ]] +epprd_rg:process_resources[3571] process_volume_groups_main RELEASE +epprd_rg:process_resources[process_volume_groups_main:2293] PS4_FUNC=process_volume_groups_main +epprd_rg:process_resources[process_volume_groups_main:2293] typeset PS4_FUNC +epprd_rg:process_resources[process_volume_groups_main:2294] [[ high == high ]] +epprd_rg:process_resources[process_volume_groups_main:2294] set -x +epprd_rg:process_resources[process_volume_groups_main:2295] DEF_VARYON_ACTION=0 +epprd_rg:process_resources[process_volume_groups_main:2295] typeset -li DEF_VARYON_ACTION +epprd_rg:process_resources[process_volume_groups_main:2296] FAILURE_IN_METHOD=0 +epprd_rg:process_resources[process_volume_groups_main:2296] typeset -li FAILURE_IN_METHOD +epprd_rg:process_resources[process_volume_groups_main:2297] ACTION=RELEASE +epprd_rg:process_resources[process_volume_groups_main:2297] typeset ACTION +epprd_rg:process_resources[process_volume_groups_main:2298] STAT=0 +epprd_rg:process_resources[process_volume_groups_main:2299] VG_LIST=datavg +epprd_rg:process_resources[process_volume_groups_main:2300] RG_LIST=epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2304] getReplicatedResources epprd_rg +epprd_rg:process_resources[getReplicatedResources:699] PS4_FUNC=getReplicatedResources +epprd_rg:process_resources[getReplicatedResources:699] typeset PS4_FUNC +epprd_rg:process_resources[getReplicatedResources:700] [[ high == high ]] +epprd_rg:process_resources[getReplicatedResources:700] set -x +epprd_rg:process_resources[getReplicatedResources:702] RV=false +epprd_rg:process_resources[getReplicatedResources:704] clodmget -n -f type HACMPrresmethods +epprd_rg:process_resources[getReplicatedResources:704] [[ -n 9 ]] +epprd_rg:process_resources[getReplicatedResources:707] : Replicated resource methods are defined, check for resources +epprd_rg:process_resources[getReplicatedResources:709] clodmget -q $'name like \'*_REP_RESOURCE\' AND group=epprd_rg' -f value -n HACMPresource +epprd_rg:process_resources[getReplicatedResources:709] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:718] : Verify if any backup profiles are configured and trigger cbm utilities based on that +epprd_rg:process_resources[getReplicatedResources:720] clodmget -q name=BACKUP_ENABLED -f value HACMPresource +epprd_rg:process_resources[getReplicatedResources:720] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:739] echo false +epprd_rg:process_resources[process_volume_groups_main:2304] REPLICATED_RESOURCES=false +epprd_rg:process_resources[process_volume_groups_main:2305] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2306] print -- datavg +epprd_rg:process_resources[process_volume_groups_main:2306] read VOLUME_GROUPS VG_LIST +epprd_rg:process_resources[process_volume_groups_main:2306] IFS=: +epprd_rg:process_resources[process_volume_groups_main:2307] VOLUME_GROUPS=datavg +epprd_rg:process_resources[process_volume_groups_main:2310] : At this point, these variables contain information only for epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2312] export VOLUME_GROUPS +epprd_rg:process_resources[process_volume_groups_main:2313] export RESOURCE_GROUPS +epprd_rg:process_resources[process_volume_groups_main:2315] [[ false == true ]] +epprd_rg:process_resources[process_volume_groups_main:2555] process_volume_groups RELEASE +epprd_rg:process_resources[process_volume_groups:2571] PS4_FUNC=process_volume_groups +epprd_rg:process_resources[process_volume_groups:2571] typeset PS4_FUNC +epprd_rg:process_resources[process_volume_groups:2572] [[ high == high ]] +epprd_rg:process_resources[process_volume_groups:2572] set -x +epprd_rg:process_resources[process_volume_groups:2573] STAT=0 +epprd_rg:process_resources[process_volume_groups:2575] GROUPNAME=epprd_rg +epprd_rg:process_resources[process_volume_groups:2575] export GROUPNAME +epprd_rg:process_resources[process_volume_groups:2578] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[process_volume_groups:2603] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[process_volume_groups:2605] cl_deactivate_vgs -n +epprd_rg:cl_deactivate_vgs[458] version=%I% +epprd_rg:cl_deactivate_vgs[461] STATUS=0 +epprd_rg:cl_deactivate_vgs[461] typeset -li STATUS +epprd_rg:cl_deactivate_vgs[462] TMP_VARYOFF_STATUS=/tmp/_deactivate_vgs.tmp +epprd_rg:cl_deactivate_vgs[463] sddsrv_off=FALSE +epprd_rg:cl_deactivate_vgs[464] ALLVGS=All_volume_groups +epprd_rg:cl_deactivate_vgs[465] OEM_CALL=false +epprd_rg:cl_deactivate_vgs[467] (( 1 != 0 )) +epprd_rg:cl_deactivate_vgs[467] [[ -n == -c ]] +epprd_rg:cl_deactivate_vgs[476] EVENT_TYPE=RELEASE_PRIMARY +epprd_rg:cl_deactivate_vgs[477] EVENT_TYPE=RELEASE_PRIMARY +epprd_rg:cl_deactivate_vgs[480] : if JOB_TYPE is set and is not $'\'GROUP\',' then process_resources is parent +epprd_rg:cl_deactivate_vgs[482] [[ VGS != 0 ]] +epprd_rg:cl_deactivate_vgs[482] [[ VGS != GROUP ]] +epprd_rg:cl_deactivate_vgs[485] : parameters passed from process_resources thru environment +epprd_rg:cl_deactivate_vgs[487] PROC_RES=true +epprd_rg:cl_deactivate_vgs[501] : set -u will report an error if any variable used in the script is not set +epprd_rg:cl_deactivate_vgs[503] set -u +epprd_rg:cl_deactivate_vgs[506] : Remove the status file if it currently exists +epprd_rg:cl_deactivate_vgs[508] rm -f /tmp/_deactivate_vgs.tmp +epprd_rg:cl_deactivate_vgs[511] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_deactivate_vgs[512] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_deactivate_vgs[513] : 99.99.999.999 +epprd_rg:cl_deactivate_vgs[515] typeset -li V R M F +epprd_rg:cl_deactivate_vgs[516] typeset -Z2 R +epprd_rg:cl_deactivate_vgs[517] typeset -Z3 M +epprd_rg:cl_deactivate_vgs[518] typeset -Z3 F +epprd_rg:cl_deactivate_vgs[519] VRMF=0 +epprd_rg:cl_deactivate_vgs[519] typeset -li VRMF +epprd_rg:cl_deactivate_vgs[528] ls '/dev/vpath*' +epprd_rg:cl_deactivate_vgs[528] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_vgs[595] : Special processing for 2-node NFS clusters +epprd_rg:cl_deactivate_vgs[597] TWO_NODE_CLUSTER=FALSE +epprd_rg:cl_deactivate_vgs[597] export TWO_NODE_CLUSTER +epprd_rg:cl_deactivate_vgs[598] FS_TYPES='jsf2?log' +epprd_rg:cl_deactivate_vgs[598] export FS_TYPES +epprd_rg:cl_deactivate_vgs[599] wc -l +epprd_rg:cl_deactivate_vgs[599] clodmget -q 'object = VERBOSE_LOGGING' -f name -n HACMPnode +epprd_rg:cl_deactivate_vgs[599] (( 2 == 2 )) +epprd_rg:cl_deactivate_vgs[600] [[ -n TRUE ]] +epprd_rg:cl_deactivate_vgs[602] : two nodes, with exported filesystems +epprd_rg:cl_deactivate_vgs[603] TWO_NODE_CLUSTER=TRUE +epprd_rg:cl_deactivate_vgs[603] export TWO_NODE_CLUSTER +epprd_rg:cl_deactivate_vgs[607] : Pick up a list of currently varyd on volume groups +epprd_rg:cl_deactivate_vgs[609] lsvg -L -o +epprd_rg:cl_deactivate_vgs[609] 2> /tmp/lsvg.err +epprd_rg:cl_deactivate_vgs[609] VG_ON_LIST=$'datavg\ncaavg_private\nrootvg' +epprd_rg:cl_deactivate_vgs[612] : if not called from process_resources, use old-style environment and parameters +epprd_rg:cl_deactivate_vgs[614] [[ true == false ]] +epprd_rg:cl_deactivate_vgs[672] : Called from process_resources +epprd_rg:cl_deactivate_vgs[674] LIST_OF_VOLUME_GROUPS_FOR_RG='' +epprd_rg:cl_deactivate_vgs[679] export GROUPNAME +epprd_rg:cl_deactivate_vgs[681] : Discover the volume groups for this resource group. +epprd_rg:cl_deactivate_vgs[686] echo datavg +epprd_rg:cl_deactivate_vgs[686] read LIST_OF_VOLUME_GROUPS_FOR_RG VOLUME_GROUPS +epprd_rg:cl_deactivate_vgs[686] IFS=: +epprd_rg:cl_deactivate_vgs[689] : Reverse the order, so that VGs release in reverse order of acquisition +epprd_rg:cl_deactivate_vgs[693] sed 's/ /,/g' +epprd_rg:cl_deactivate_vgs[693] echo datavg +epprd_rg:cl_deactivate_vgs[693] LIST_OF_COMMASEP_VG_FOR_RG=datavg +epprd_rg:cl_deactivate_vgs[694] echo datavg +epprd_rg:cl_deactivate_vgs[695] tr , '\n' +epprd_rg:cl_deactivate_vgs[695] egrep -v -w $'rootvg|caavg_private\n |altinst_rootvg|old_rootvg' +epprd_rg:cl_deactivate_vgs[696] sort -ru +epprd_rg:cl_deactivate_vgs[694] LIST_OF_VOLUME_GROUPS_FOR_RG=datavg +epprd_rg:cl_deactivate_vgs[698] : Update Resource Manager - releasing VGs for this RG +epprd_rg:cl_deactivate_vgs[700] cl_RMupdate resource_releasing All_volume_groups cl_deactivate_vgs 2023-01-28T19:42:38.798451 2023-01-28T19:42:38.802944 +epprd_rg:cl_deactivate_vgs[703] : Process the volume groups for this resource group +epprd_rg:cl_deactivate_vgs:datavg[707] PS4_LOOP=datavg +epprd_rg:cl_deactivate_vgs:datavg[711] grep -qw datavg +epprd_rg:cl_deactivate_vgs:datavg[711] print datavg caavg_private rootvg +epprd_rg:cl_deactivate_vgs:datavg[719] : Thie VG is varied on, so go vary it off. Get the VG mode first +epprd_rg:cl_deactivate_vgs:datavg[721] MODE=9999 +epprd_rg:cl_deactivate_vgs:datavg[722] /usr/sbin/getlvodm -v datavg +epprd_rg:cl_deactivate_vgs:datavg[722] VGID=00c44af100004b00000001851e9dc053 +epprd_rg:cl_deactivate_vgs:datavg[723] lqueryvg -g 00c44af100004b00000001851e9dc053 -X +epprd_rg:cl_deactivate_vgs:datavg[723] MODE=32 +epprd_rg:cl_deactivate_vgs:datavg[724] RC=0 +epprd_rg:cl_deactivate_vgs:datavg[725] (( 0 != 0 )) +epprd_rg:cl_deactivate_vgs:datavg[726] : exit status of lqueryvg -g 00c44af100004b00000001851e9dc053 -X: 0 +epprd_rg:cl_deactivate_vgs:datavg[728] vgs_varyoff datavg 32 +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:60] PS4_TIMER=true +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:60] typeset PS4_TIMER +epprd_rg:cl_deactivate_vgs(0.093):datavg[vgs_varyoff:61] [[ high == high ]] +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:61] set -x +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:63] VG=datavg +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:63] typeset VG +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:64] MODE=32 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:64] typeset MODE +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:66] OPEN_FSs='' +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:66] typeset OPEN_FSs +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:67] OPEN_LVs='' +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:67] typeset OPEN_LVs +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:68] typeset TMP_VG_LIST +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:69] TS_FLAGS='' +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:69] typeset TS_FLAGS +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:71] STATUS=0 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:71] typeset -li STATUS +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:72] RC=0 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:72] typeset -li RC +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:73] SELECTIVE_FAILOVER=false +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:73] typeset SELECTIVE_FAILOVER +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:74] typeset LV +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:75] lv_list='' +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:75] typeset lv_list +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:76] typeset FS +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:77] FS_MOUNTED='' +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:77] typeset FS_MOUNTED +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:79] rc_fuser=0 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:79] typeset -li rc_fuser +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:80] rc_varyonvg=0 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:80] typeset -li rc_varyonvg +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:81] rc_varyoffvg=0 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:81] typeset -li rc_varyoffvg +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:82] rc_lsvg=0 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:82] typeset -li rc_lsvg +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:83] rc_dfs=0 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:83] typeset -li rc_dfs +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:84] rc_dvg=0 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:84] typeset -li rc_dvg +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:88] typeset -li FV FR FM FF +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:89] typeset -Z2 FR +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:90] typeset -Z3 FM +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:91] typeset -Z3 FF +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:92] FVRMF=0 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:92] typeset -li FVRMF +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:93] fuser_lvl=601004000 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:93] typeset -li fuser_lvl +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:95] lsvg -l -L datavg +epprd_rg:cl_deactivate_vgs(0.095):datavg[vgs_varyoff:95] 2> /dev/null +epprd_rg:cl_deactivate_vgs(0.115):datavg[vgs_varyoff:95] TMP_VG_LIST=$'datavg:\nLV NAME TYPE LPs PPs PVs LV STATE MOUNT POINT\nepprdaloglv jfs2log 1 1 1 closed/syncd N/A\nsaplv jfs2 100 100 7 closed/syncd /usr/sap\nsapmntlv jfs2 20 20 7 closed/syncd /sapmnt\noraclelv jfs2 40 40 7 closed/syncd /oracle\nepplv jfs2 60 60 7 closed/syncd /oracle/EPP\noraarchlv jfs2 100 100 7 closed/syncd /oracle/EPP/oraarch\nsapdata1lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata1\nsapdata2lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata2\nsapdata3lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata3\nsapdata4lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata4\nboardlv jfs2 10 10 7 closed/syncd /board_org\noriglogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogA\noriglogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogB\nmirrlogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogA\nmirrlogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_vgs(0.115):datavg[vgs_varyoff:96] rc_lsvg=0 +epprd_rg:cl_deactivate_vgs(0.115):datavg[vgs_varyoff:98] [[ RELEASE_PRIMARY == reconfig* ]] +epprd_rg:cl_deactivate_vgs(0.116):datavg[vgs_varyoff:114] [[ -n $'datavg:\nLV NAME TYPE LPs PPs PVs LV STATE MOUNT POINT\nepprdaloglv jfs2log 1 1 1 closed/syncd N/A\nsaplv jfs2 100 100 7 closed/syncd /usr/sap\nsapmntlv jfs2 20 20 7 closed/syncd /sapmnt\noraclelv jfs2 40 40 7 closed/syncd /oracle\nepplv jfs2 60 60 7 closed/syncd /oracle/EPP\noraarchlv jfs2 100 100 7 closed/syncd /oracle/EPP/oraarch\nsapdata1lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata1\nsapdata2lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata2\nsapdata3lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata3\nsapdata4lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata4\nboardlv jfs2 10 10 7 closed/syncd /board_org\noriglogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogA\noriglogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogB\nmirrlogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogA\nmirrlogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogB' ]] +epprd_rg:cl_deactivate_vgs(0.116):datavg[vgs_varyoff:117] : Get list of open logical volumes corresponding to filesystems +epprd_rg:cl_deactivate_vgs(0.117):datavg[vgs_varyoff:119] awk '$2 ~ /jfs2?$/ && $6 ~ /open/ {print $1}' +epprd_rg:cl_deactivate_vgs(0.117):datavg[vgs_varyoff:119] print $'datavg:\nLV NAME TYPE LPs PPs PVs LV STATE MOUNT POINT\nepprdaloglv jfs2log 1 1 1 closed/syncd N/A\nsaplv jfs2 100 100 7 closed/syncd /usr/sap\nsapmntlv jfs2 20 20 7 closed/syncd /sapmnt\noraclelv jfs2 40 40 7 closed/syncd /oracle\nepplv jfs2 60 60 7 closed/syncd /oracle/EPP\noraarchlv jfs2 100 100 7 closed/syncd /oracle/EPP/oraarch\nsapdata1lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata1\nsapdata2lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata2\nsapdata3lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata3\nsapdata4lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata4\nboardlv jfs2 10 10 7 closed/syncd /board_org\noriglogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogA\noriglogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogB\nmirrlogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogA\nmirrlogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_vgs(0.121):datavg[vgs_varyoff:119] OPEN_LVs='' +epprd_rg:cl_deactivate_vgs(0.121):datavg[vgs_varyoff:122] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_vgs(0.121):datavg[vgs_varyoff:140] [[ -n '' ]] +epprd_rg:cl_deactivate_vgs(0.121):datavg[vgs_varyoff:167] [[ TRUE == TRUE ]] +epprd_rg:cl_deactivate_vgs(0.121):datavg[vgs_varyoff:170] : For two-node clusters, special processing for the highly available NFS +epprd_rg:cl_deactivate_vgs(0.121):datavg[vgs_varyoff:171] : server function: tell NFS to dump the dup cache into the jfslog or jfs2log +epprd_rg:cl_deactivate_vgs(0.121):datavg[vgs_varyoff:175] : Find the first log device in the saved list of logical volumes +epprd_rg:cl_deactivate_vgs(0.121):datavg[vgs_varyoff:177] pattern='jsf2?log' +epprd_rg:cl_deactivate_vgs(0.123):datavg[vgs_varyoff:178] awk '$2 ~ /jsf2?log/ {printf "/dev/%s\n", $1 ; exit}' +epprd_rg:cl_deactivate_vgs(0.123):datavg[vgs_varyoff:178] print $'datavg:\nLV NAME TYPE LPs PPs PVs LV STATE MOUNT POINT\nepprdaloglv jfs2log 1 1 1 closed/syncd N/A\nsaplv jfs2 100 100 7 closed/syncd /usr/sap\nsapmntlv jfs2 20 20 7 closed/syncd /sapmnt\noraclelv jfs2 40 40 7 closed/syncd /oracle\nepplv jfs2 60 60 7 closed/syncd /oracle/EPP\noraarchlv jfs2 100 100 7 closed/syncd /oracle/EPP/oraarch\nsapdata1lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata1\nsapdata2lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata2\nsapdata3lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata3\nsapdata4lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata4\nboardlv jfs2 10 10 7 closed/syncd /board_org\noriglogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogA\noriglogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogB\nmirrlogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogA\nmirrlogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_vgs(0.127):datavg[vgs_varyoff:178] logdev='' +epprd_rg:cl_deactivate_vgs(0.127):datavg[vgs_varyoff:180] [[ -z '' ]] +epprd_rg:cl_deactivate_vgs(0.127):datavg[vgs_varyoff:181] [[ true == true ]] +epprd_rg:cl_deactivate_vgs(0.127):datavg[vgs_varyoff:182] [[ ONLINE != ONLINE ]] +epprd_rg:cl_deactivate_vgs(0.127):datavg[vgs_varyoff:216] [[ -n '' ]] +epprd_rg:cl_deactivate_vgs(0.127):datavg[vgs_varyoff:223] : Finally, vary off the volume group +epprd_rg:cl_deactivate_vgs(0.127):datavg[vgs_varyoff:226] amlog_trace '' 'Deactivating Volume Group|datavg' +epprd_rg:cl_deactivate_vgs(0.127):datavg[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_vgs(0.128):datavg[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_vgs(0.152):datavg[amlog_trace:319] cltime +epprd_rg:cl_deactivate_vgs(0.155):datavg[amlog_trace:319] DATE=2023-01-28T19:42:38.890815 +epprd_rg:cl_deactivate_vgs(0.155):datavg[amlog_trace:320] echo '|2023-01-28T19:42:38.890815|INFO: Deactivating Volume Group|datavg' +epprd_rg:cl_deactivate_vgs(0.155):datavg[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_vgs(0.155):datavg[vgs_varyoff:228] [[ 32 == 32 ]] +epprd_rg:cl_deactivate_vgs(0.155):datavg[vgs_varyoff:231] : This VG is ECM. Move to passive mode. +epprd_rg:cl_deactivate_vgs(0.155):datavg[vgs_varyoff:244] TS_FLAGS=-o +epprd_rg:cl_deactivate_vgs(0.155):datavg[vgs_varyoff:245] cltime 2023-01-28T19:42:38.893555 +epprd_rg:cl_deactivate_vgs(0.158):datavg[vgs_varyoff:246] varyonvg -c -n -P datavg +epprd_rg:cl_deactivate_vgs(0.159):datavg[vgs_varyoff:246] 2> /dev/null +epprd_rg:cl_deactivate_vgs(0.413):datavg[vgs_varyoff:247] rc_varyonvg=0 +epprd_rg:cl_deactivate_vgs(0.413):datavg[vgs_varyoff:248] : return code from varyonvg -c -n -P datavg is 0 +epprd_rg:cl_deactivate_vgs(0.413):datavg[vgs_varyoff:249] cltime 2023-01-28T19:42:39.151660 +epprd_rg:cl_deactivate_vgs(0.416):datavg[vgs_varyoff:250] (( 0 != 0 )) +epprd_rg:cl_deactivate_vgs(0.416):datavg[vgs_varyoff:277] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_vgs(0.416):datavg[vgs_varyoff:281] amlog_trace '' 'Deactivating Volume Group|datavg' +epprd_rg:cl_deactivate_vgs(0.416):datavg[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_vgs(0.417):datavg[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_vgs(0.442):datavg[amlog_trace:319] cltime +epprd_rg:cl_deactivate_vgs(0.444):datavg[amlog_trace:319] DATE=2023-01-28T19:42:39.180123 +epprd_rg:cl_deactivate_vgs(0.444):datavg[amlog_trace:320] echo '|2023-01-28T19:42:39.180123|INFO: Deactivating Volume Group|datavg' +epprd_rg:cl_deactivate_vgs(0.445):datavg[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_vgs(0.445):datavg[vgs_varyoff:284] RC=0 +epprd_rg:cl_deactivate_vgs(0.445):datavg[vgs_varyoff:287] : Update LVM volume group timestamps in ODM +epprd_rg:cl_deactivate_vgs(0.445):datavg[vgs_varyoff:289] cl_update_vg_odm_ts -o datavg +epprd_rg:cl_update_vg_odm_ts(0.001)[77] version=1.13 +epprd_rg:cl_update_vg_odm_ts(0.001)[121] o_flag='' +epprd_rg:cl_update_vg_odm_ts(0.001)[122] f_flag='' +epprd_rg:cl_update_vg_odm_ts(0.001)[123] getopts :of option +epprd_rg:cl_update_vg_odm_ts(0.001)[126] : Local timestamps should be good, since volume group was +epprd_rg:cl_update_vg_odm_ts(0.001)[127] : just varyied on or off +epprd_rg:cl_update_vg_odm_ts(0.001)[128] o_flag=TRUE +epprd_rg:cl_update_vg_odm_ts(0.001)[123] getopts :of option +epprd_rg:cl_update_vg_odm_ts(0.001)[142] shift 1 +epprd_rg:cl_update_vg_odm_ts(0.001)[144] vg_name=datavg +epprd_rg:cl_update_vg_odm_ts(0.001)[145] [[ -z datavg ]] +epprd_rg:cl_update_vg_odm_ts(0.001)[151] shift +epprd_rg:cl_update_vg_odm_ts(0.001)[152] node_list='' +epprd_rg:cl_update_vg_odm_ts(0.001)[153] /usr/es/sbin/cluster/utilities/cl_get_path all +epprd_rg:cl_update_vg_odm_ts(0.004)[153] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin +epprd_rg:cl_update_vg_odm_ts(0.004)[155] [[ -z '' ]] +epprd_rg:cl_update_vg_odm_ts(0.004)[158] : Check to see if this update is necessary - some LVM levels automatically +epprd_rg:cl_update_vg_odm_ts(0.004)[159] : update volume group timestamps clusterwide. +epprd_rg:cl_update_vg_odm_ts(0.004)[163] instfix -iqk IV74100 +epprd_rg:cl_update_vg_odm_ts(0.005)[163] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.012)[164] instfix -iqk IV74883 +epprd_rg:cl_update_vg_odm_ts(0.013)[164] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.020)[165] instfix -iqk IV74698 +epprd_rg:cl_update_vg_odm_ts(0.021)[165] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.028)[166] instfix -iqk IV74246 +epprd_rg:cl_update_vg_odm_ts(0.029)[166] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.036)[174] emgr -l -L IV74883 +epprd_rg:cl_update_vg_odm_ts(0.037)[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.291)[174] emgr -l -L IV74698 +epprd_rg:cl_update_vg_odm_ts(0.292)[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.547)[174] emgr -l -L IV74246 +epprd_rg:cl_update_vg_odm_ts(0.548)[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.803)[183] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_update_vg_odm_ts(0.803)[184] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_update_vg_odm_ts(0.803)[185] : 99.99.999.999 +epprd_rg:cl_update_vg_odm_ts(0.803)[187] typeset -li V R M F +epprd_rg:cl_update_vg_odm_ts(0.803)[188] typeset -Z2 V +epprd_rg:cl_update_vg_odm_ts(0.803)[189] typeset -Z2 R +epprd_rg:cl_update_vg_odm_ts(0.803)[190] typeset -Z3 M +epprd_rg:cl_update_vg_odm_ts(0.803)[191] typeset -Z3 F +epprd_rg:cl_update_vg_odm_ts(0.803)[192] lvm_lvl6=601008015 +epprd_rg:cl_update_vg_odm_ts(0.803)[192] typeset -li lvm_lvl6 +epprd_rg:cl_update_vg_odm_ts(0.803)[194] lvm_lvl7=701003046 +epprd_rg:cl_update_vg_odm_ts(0.803)[194] typeset -li lvm_lvl7 +epprd_rg:cl_update_vg_odm_ts(0.803)[195] VRMF=0 +epprd_rg:cl_update_vg_odm_ts(0.803)[195] typeset -li VRMF +epprd_rg:cl_update_vg_odm_ts(0.803)[198] : Here try and figure out what level of LVM is installed +epprd_rg:cl_update_vg_odm_ts(0.804)[200] lslpp -lcqOr bos.rte.lvm +epprd_rg:cl_update_vg_odm_ts(0.805)[200] cut -f3 -d: +epprd_rg:cl_update_vg_odm_ts(0.806)[200] read V R M F +epprd_rg:cl_update_vg_odm_ts(0.806)[200] IFS=. +epprd_rg:cl_update_vg_odm_ts(0.806)[201] VRMF=0702005101 +epprd_rg:cl_update_vg_odm_ts(0.806)[203] (( 7 == 6 && 702005101 >= 601008015 )) +epprd_rg:cl_update_vg_odm_ts(0.806)[204] (( 702005101 >= 701003046 )) +epprd_rg:cl_update_vg_odm_ts(0.806)[207] : LVM at a level in which timestamp update is unnecessary +epprd_rg:cl_update_vg_odm_ts(0.806)[209] return 0 +epprd_rg:cl_deactivate_vgs(1.255):datavg[vgs_varyoff:291] (( 0 == 0 )) +epprd_rg:cl_deactivate_vgs(1.255):datavg[vgs_varyoff:294] : successful varyoff, set the fence height to read-only +epprd_rg:cl_deactivate_vgs(1.255):datavg[vgs_varyoff:297] cl_set_vg_fence_height -c datavg ro cl_set_vg_fence_height[126]: version @(#)10 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 cl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY) cl_set_vg_fence_height[214]: read(datavg, 16) cl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=ro(2)) +epprd_rg:cl_deactivate_vgs(1.259):datavg[vgs_varyoff:298] RC=0 +epprd_rg:cl_deactivate_vgs(1.259):datavg[vgs_varyoff:299] (( 0 != 0 )) +epprd_rg:cl_deactivate_vgs(1.259):datavg[vgs_varyoff:403] : Append status to the status file. +epprd_rg:cl_deactivate_vgs(1.259):datavg[vgs_varyoff:407] echo datavg 0 +epprd_rg:cl_deactivate_vgs(1.259):datavg[vgs_varyoff:407] 1>> /tmp/_deactivate_vgs.tmp +epprd_rg:cl_deactivate_vgs(1.259):datavg[vgs_varyoff:408] return 0 +epprd_rg:cl_deactivate_vgs(1.259):datavg[731] unset PS4_LOOP +epprd_rg:cl_deactivate_vgs(1.260)[736] : Wait for the background instances of vgs_varyoff +epprd_rg:cl_deactivate_vgs(1.260)[738] wait +epprd_rg:cl_deactivate_vgs(1.260)[741] : Collect any failure indications from backgrounded varyoff processing +epprd_rg:cl_deactivate_vgs(1.260)[743] [[ -f /tmp/_deactivate_vgs.tmp ]] +epprd_rg:cl_deactivate_vgs(1.261)[748] cat /tmp/_deactivate_vgs.tmp +epprd_rg:cl_deactivate_vgs(1.261)[748] read VGNAME VARYOFF_STATUS +epprd_rg:cl_deactivate_vgs(1.262)[750] [[ 0 == 1 ]] +epprd_rg:cl_deactivate_vgs(1.262)[748] read VGNAME VARYOFF_STATUS +epprd_rg:cl_deactivate_vgs(1.262)[765] rm -f /tmp/_deactivate_vgs.tmp +epprd_rg:cl_deactivate_vgs(1.265)[769] : Update Resource Manager - release success for the non-error VGs +epprd_rg:cl_deactivate_vgs(1.265)[771] ALLNOERRVGS=All_nonerror_volume_groups +epprd_rg:cl_deactivate_vgs(1.265)[772] [[ true == false ]] +epprd_rg:cl_deactivate_vgs(1.265)[778] cl_RMupdate resource_down All_nonerror_volume_groups cl_deactivate_vgs 2023-01-28T19:42:40.024113 2023-01-28T19:42:40.028663 +epprd_rg:cl_deactivate_vgs(1.293)[782] [[ FALSE == TRUE ]] +epprd_rg:cl_deactivate_vgs(1.294)[791] exit 0 +epprd_rg:process_resources[process_volume_groups:2606] RC=0 +epprd_rg:process_resources[process_volume_groups:2607] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[process_volume_groups:2620] (( 0 != 0 )) +epprd_rg:process_resources[process_volume_groups:2627] return 0 +epprd_rg:process_resources[process_volume_groups_main:2556] STAT=0 +epprd_rg:process_resources[process_volume_groups_main:2559] return 0 +epprd_rg:process_resources[3572] RC=0 +epprd_rg:process_resources[3573] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3575] [[ 0 != 0 ]] +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T19:42:40.042341 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=SERVICE_LABELS ACTION=RELEASE IP_LABELS='"epprd"' RESOURCE_GROUPS='"epprd_rg' '"' COMMUNICATION_LINKS='""' +epprd_rg:process_resources[1] JOB_TYPE=SERVICE_LABELS +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] IP_LABELS=epprd +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] COMMUNICATION_LINKS='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ SERVICE_LABELS == RELEASE ]] +epprd_rg:process_resources[3360] [[ SERVICE_LABELS == ONLINE ]] +epprd_rg:process_resources[3407] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[3411] release_service_labels +epprd_rg:process_resources[release_service_labels:3125] PS4_FUNC=release_service_labels +epprd_rg:process_resources[release_service_labels:3125] typeset PS4_FUNC +epprd_rg:process_resources[release_service_labels:3126] [[ high == high ]] +epprd_rg:process_resources[release_service_labels:3126] set -x +epprd_rg:process_resources[release_service_labels:3127] STAT=0 +epprd_rg:process_resources[release_service_labels:3128] clcallev release_service_addr Jan 28 2023 19:42:40 EVENT START: release_service_addr |2023-01-28T19:42:40|8592|EVENT START: release_service_addr | +epprd_rg:release_service_addr[87] version=1.44 +epprd_rg:release_service_addr[90] STATUS=0 +epprd_rg:release_service_addr[91] PROC_RES=false +epprd_rg:release_service_addr[95] [[ SERVICE_LABELS != 0 ]] +epprd_rg:release_service_addr[95] [[ SERVICE_LABELS != GROUP ]] +epprd_rg:release_service_addr[96] PROC_RES=true +epprd_rg:release_service_addr[97] _IP_LABELS=epprd +epprd_rg:release_service_addr[109] saveNSORDER=UNDEFINED +epprd_rg:release_service_addr[110] NSORDER=local +epprd_rg:release_service_addr[110] export NSORDER +epprd_rg:release_service_addr[117] export GROUPNAME +epprd_rg:release_service_addr[119] [[ true == true ]] +epprd_rg:release_service_addr[120] get_list_head epprd +epprd_rg:release_service_addr[120] read SERVICELABELS +epprd_rg:release_service_addr[121] get_list_tail epprd +epprd_rg:release_service_addr[121] read IP_LABELS +epprd_rg:release_service_addr[127] cl_RMupdate resource_releasing All_service_addrs release_service_addr 2023-01-28T19:42:40.128732 2023-01-28T19:42:40.133333 +epprd_rg:release_service_addr[136] clgetif -a epprd +epprd_rg:release_service_addr[136] LC_ALL=C en0 +epprd_rg:release_service_addr[137] return_code=0 +epprd_rg:release_service_addr[137] typeset -li return_code +epprd_rg:release_service_addr[138] (( 0 )) +epprd_rg:release_service_addr[159] cllsif -J '~' -Sn epprd +epprd_rg:release_service_addr[159] cut -d~ -f7 +epprd_rg:release_service_addr[159] uniq +epprd_rg:release_service_addr[159] textual_addr=61.81.244.156 +epprd_rg:release_service_addr[160] clgetif -a 61.81.244.156 +epprd_rg:release_service_addr[160] LC_ALL=C +epprd_rg:release_service_addr[160] INTERFACE='en0 ' +epprd_rg:release_service_addr[161] [[ -z 'en0 ' ]] +epprd_rg:release_service_addr[182] clgetif -n 61.81.244.156 +epprd_rg:release_service_addr[182] LC_ALL=C +epprd_rg:release_service_addr[182] NETMASK='255.255.255.0 ' +epprd_rg:release_service_addr[183] cllsif -J '~' +epprd_rg:release_service_addr[183] grep -wF 61.81.244.156 +epprd_rg:release_service_addr[184] cut -d~ -f3 +epprd_rg:release_service_addr[184] sort -u +epprd_rg:release_service_addr[183] NETWORK=net_ether_01 +epprd_rg:release_service_addr[189] cllsif -J '~' -Si epprda +epprd_rg:release_service_addr[189] grep '~boot~' +epprd_rg:release_service_addr[190] cut -d~ -f3,7 +epprd_rg:release_service_addr[190] grep ^net_ether_01~ +epprd_rg:release_service_addr[191] cut -d~ -f2 +epprd_rg:release_service_addr[191] tail -1 +epprd_rg:release_service_addr[189] BOOT=61.81.244.134 +epprd_rg:release_service_addr[193] [[ -z 61.81.244.134 ]] +epprd_rg:release_service_addr[214] [[ -n 'en0 ' ]] +epprd_rg:release_service_addr[216] cut -f15 -d~ +epprd_rg:release_service_addr[216] cllsif -J '~' -Sn 61.81.244.156 +epprd_rg:release_service_addr[216] [[ AF_INET == AF_INET6 ]] +epprd_rg:release_service_addr[221] cl_swap_IP_address rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[462] version=1.9.14.8 +epprd_rg:cl_swap_IP_address[464] cl_get_path -S +epprd_rg:cl_swap_IP_address[464] OP_SEP='~' +epprd_rg:cl_swap_IP_address[465] LC_ALL=C +epprd_rg:cl_swap_IP_address[465] export LC_ALL +epprd_rg:cl_swap_IP_address[466] RESTORE_ROUTES=/usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[468] cl_echo 33 'Starting execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0' /usr/es/sbin/cluster/events/utils/cl_swap_IP_address 'rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0' Jan 28 2023 19:42:40Starting execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0+epprd_rg:cl_swap_IP_address[470] typeset -i oslevel +epprd_rg:cl_swap_IP_address[471] /usr/bin/sed s/-//g +epprd_rg:cl_swap_IP_address[471] /usr/bin/oslevel -r +epprd_rg:cl_swap_IP_address[471] oslevel=720005 +epprd_rg:cl_swap_IP_address[476] [[ 6 == 6 ]] +epprd_rg:cl_swap_IP_address[477] [[ 6 == 7 ]] +epprd_rg:cl_swap_IP_address[484] no -a +epprd_rg:cl_swap_IP_address[484] grep ipignoreredirects +epprd_rg:cl_swap_IP_address[484] awk '{ print $3 }' +epprd_rg:cl_swap_IP_address[484] PRIOR_IPIGNORE_REDIRECTS_VALUE=0 +epprd_rg:cl_swap_IP_address[485] /usr/sbin/no -o ipignoreredirects=1 Setting ipignoreredirects to 1 +epprd_rg:cl_swap_IP_address[490] PROC_RES=false +epprd_rg:cl_swap_IP_address[491] [[ SERVICE_LABELS != 0 ]] +epprd_rg:cl_swap_IP_address[491] [[ SERVICE_LABELS != GROUP ]] +epprd_rg:cl_swap_IP_address[492] PROC_RES=true +epprd_rg:cl_swap_IP_address[495] set -u +epprd_rg:cl_swap_IP_address[497] RC=0 +epprd_rg:cl_swap_IP_address[504] netstat -in Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll en0 1500 link#2 fa.e6.13.4e.a9.20 184316975 0 61161150 0 0 en0 1500 61.81.244 61.81.244.156 184316975 0 61161150 0 0 en0 1500 61.81.244 61.81.244.134 184316975 0 61161150 0 0 lo0 16896 link#1 35610082 0 35610082 0 0 lo0 16896 127 127.0.0.1 35610082 0 35610082 0 0 lo0 16896 ::1%1 35610082 0 35610082 0 0 +epprd_rg:cl_swap_IP_address[505] netstat -rnC Routing tables Destination Gateway Flags Wt Policy If Cost Config_Cost Route tree for Protocol Family 2 (Internet): default 61.81.244.1 UG 1 - en0 0 0 61.81.244.0 61.81.244.156 UHSb 1 - en0 0 0 => 61.81.244/24 61.81.244.156 U 1 - en0 0 0 61.81.244.134 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.156 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.255 61.81.244.156 UHSb 1 - en0 0 0 127/8 127.0.0.1 U 1 - lo0 0 0 Route tree for Protocol Family 24 (Internet v6): ::1%1 ::1%1 UH 1 - lo0 0 0 +epprd_rg:cl_swap_IP_address[506] CASC_OR_ROT=rotating +epprd_rg:cl_swap_IP_address[507] ACQ_OR_RLSE=release +epprd_rg:cl_swap_IP_address[508] IF=en0 +epprd_rg:cl_swap_IP_address[509] ADDR=61.81.244.134 +epprd_rg:cl_swap_IP_address[510] OLD_ADDR=61.81.244.156 +epprd_rg:cl_swap_IP_address[511] NETMASK=255.255.255.0 +epprd_rg:cl_swap_IP_address[514] [[ rotating == cascading ]] +epprd_rg:cl_swap_IP_address[525] cut -f3 -d~ +epprd_rg:cl_swap_IP_address[525] cllsif -J '~' -Sw -n 61.81.244.134 +epprd_rg:cl_swap_IP_address[525] NET=net_ether_01 +epprd_rg:cl_swap_IP_address[528] clodmget -qidentifier=61.81.244.134 -f max_aliases -n HACMPadapter +epprd_rg:cl_swap_IP_address[528] ALIAS_FIRST=0 +epprd_rg:cl_swap_IP_address[529] grep -c -w inet +epprd_rg:cl_swap_IP_address[529] ifconfig en0 +epprd_rg:cl_swap_IP_address[529] LC_ALL=C +epprd_rg:cl_swap_IP_address[529] NUM_ADDRS=2 +epprd_rg:cl_swap_IP_address[530] [[ release == acquire ]] +epprd_rg:cl_swap_IP_address[598] cl_echo 7320 'cl_swap_IP_address: Removing aliased IP address 61.81.244.156 from adapter en0' cl_swap_IP_address 61.81.244.156 en0 Jan 28 2023 19:42:40cl_swap_IP_address: Removing aliased IP address 61.81.244.156 from adapter en0+epprd_rg:cl_swap_IP_address[600] amlog_trace '' 'Deliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_swap_IP_address[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_swap_IP_address[amlog_trace:319] cltime +epprd_rg:cl_swap_IP_address[amlog_trace:319] DATE=2023-01-28T19:42:40.379288 +epprd_rg:cl_swap_IP_address[amlog_trace:320] echo '|2023-01-28T19:42:40.379288|INFO: Deliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_swap_IP_address[601] PERSISTENT='' +epprd_rg:cl_swap_IP_address[602] ADDR1=61.81.244.156 +epprd_rg:cl_swap_IP_address[603] disable_pmtu_gated Setting tcp_pmtu_discover to 0 Setting udp_pmtu_discover to 0 +epprd_rg:cl_swap_IP_address[604] alias_replace_routes /usr/es/sbin/cluster/.restore_routes en0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:168] RR=/usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[alias_replace_routes:169] shift +epprd_rg:cl_swap_IP_address[alias_replace_routes:170] interfaces=en0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:171] RC=0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:173] cp /dev/null /usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[alias_replace_routes:175] cat +epprd_rg:cl_swap_IP_address[alias_replace_routes:175] 1> /usr/es/sbin/cluster/.restore_routes 0<< \EOF +epprd_rg:cl_swap_IP_address[alias_replace_routes:175] date #!/bin/ksh # # Script created by cl_swap_IP_address on Sat Jan 28 19:42:40 KORST 2023 # PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin PS4='${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO] ' export VERBOSE_LOGGING=${VERBOSE_LOGGING:-"high"} [[ "$VERBOSE_LOGGING" = "high" ]] && set -x : Starting $0 at $(date) # EOF +epprd_rg:cl_swap_IP_address[alias_replace_routes:189] awk '$3 !~ "[Ll]ink" && $3 !~ ":" && $3 !~ "Network" {print $4}' +epprd_rg:cl_swap_IP_address[alias_replace_routes:189] netstat -in +epprd_rg:cl_swap_IP_address[alias_replace_routes:189] LOCADDRS=$'61.81.244.156\n61.81.244.134\n127.0.0.1' +epprd_rg:cl_swap_IP_address[alias_replace_routes:191] netstat -rnC Routing tables Destination Gateway Flags Wt Policy If Cost Config_Cost Route tree for Protocol Family 2 (Internet): default 61.81.244.1 UG 1 - en0 0 0 61.81.244.0 61.81.244.156 UHSb 1 - en0 0 0 => 61.81.244/24 61.81.244.156 U 1 - en0 0 0 61.81.244.134 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.156 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.255 61.81.244.156 UHSb 1 - en0 0 0 127/8 127.0.0.1 U 1 - lo0 0 0 Route tree for Protocol Family 24 (Internet v6): ::1%1 ::1%1 UH 1 - lo0 0 0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:200] I=1 +epprd_rg:cl_swap_IP_address[alias_replace_routes:200] typeset -li I +epprd_rg:cl_swap_IP_address[alias_replace_routes:201] NXTSVC='' +epprd_rg:cl_swap_IP_address[alias_replace_routes:203] awk '$3 !~ "[Ll]ink" && $3 !~ ":" && ($1 == "en0" || $1 == "en0*") {print $4}' +epprd_rg:cl_swap_IP_address[alias_replace_routes:203] netstat -in +epprd_rg:cl_swap_IP_address[alias_replace_routes:203] IFADDRS=$'61.81.244.156\n61.81.244.134' +epprd_rg:cl_swap_IP_address[alias_replace_routes:204] cllsif -J '~' -Spi epprda +epprd_rg:cl_swap_IP_address[alias_replace_routes:204] grep '~net_ether_01~' +epprd_rg:cl_swap_IP_address[alias_replace_routes:205] grep -E '~service~|~persistent~' +epprd_rg:cl_swap_IP_address[alias_replace_routes:205] cut -d~ -f7 +epprd_rg:cl_swap_IP_address[alias_replace_routes:205] sort -u +epprd_rg:cl_swap_IP_address[alias_replace_routes:204] SVCADDRS=61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:210] awk '$1 !~ ":" {print $1}' +epprd_rg:cl_swap_IP_address[alias_replace_routes:210] echo 61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:210] SVCADDRS=61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:212] cllsif -J '~' -Spi epprda +epprd_rg:cl_swap_IP_address[alias_replace_routes:212] grep '~net_ether_01~' +epprd_rg:cl_swap_IP_address[alias_replace_routes:213] grep -E '~persistent~' +epprd_rg:cl_swap_IP_address[alias_replace_routes:214] cut -d~ -f7 +epprd_rg:cl_swap_IP_address[alias_replace_routes:212] PERSISTENT_IP='' +epprd_rg:cl_swap_IP_address[alias_replace_routes:215] routeaddr='' +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:223] [[ -z '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:225] routeaddr=61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:227] [[ 61.81.244.156 != 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:234] [[ -n '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.134 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.134 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] [[ 61.81.244.134 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:234] [[ -n '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:243] NXTADDR='' +epprd_rg:cl_swap_IP_address[alias_replace_routes:244] bootaddr='' +epprd_rg:cl_swap_IP_address[alias_replace_routes:245] [[ -z '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:247] cllsif -J '~' -Spi epprda +epprd_rg:cl_swap_IP_address[alias_replace_routes:247] grep '~net_ether_01~' +epprd_rg:cl_swap_IP_address[alias_replace_routes:247] grep '~boot~' +epprd_rg:cl_swap_IP_address[alias_replace_routes:247] awk -F~ '$9 == "en0" { print $7; }' +epprd_rg:cl_swap_IP_address[alias_replace_routes:247] bootaddr=61.81.244.134 +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] [[ 61.81.244.156 == 61.81.244.134 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] [[ 61.81.244.134 == 61.81.244.134 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] clgetnet 61.81.244.134 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] clgetnet 61.81.244.134 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] clgetnet 61.81.244.134 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] [[ 61.81.244.134 != 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:252] NXTADDR=61.81.244.134 +epprd_rg:cl_swap_IP_address[alias_replace_routes:253] break +epprd_rg:cl_swap_IP_address[alias_replace_routes:258] swaproute=0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:259] NETSTAT_FLAGS='-nrf inet' +epprd_rg:cl_swap_IP_address[alias_replace_routes:261] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:264] swaproute=1 +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] netstat -nrf inet +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] fgrep -w en0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] read DEST GW FLAGS OTHER +epprd_rg:cl_swap_IP_address[alias_replace_routes:268] LOOPBACK=127.0.0.1 +epprd_rg:cl_swap_IP_address[alias_replace_routes:336] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:336] clgetnet 61.81.244.1 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:336] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:338] [[ 0 == 0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:341] [[ -z release ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:341] [[ 61.81.244.156 == ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:346] print 'cl_route_change default 127.0.0.1 61.81.244.1 inet' +epprd_rg:cl_swap_IP_address[alias_replace_routes:346] 1>> /usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[alias_replace_routes:347] add_rc_check /usr/es/sbin/cluster/.restore_routes cl_route_change +epprd_rg:cl_swap_IP_address[add_rc_check:70] RR=/usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[add_rc_check:71] FUNC=cl_route_change +epprd_rg:cl_swap_IP_address[add_rc_check:73] cat +epprd_rg:cl_swap_IP_address[add_rc_check:73] 1>> /usr/es/sbin/cluster/.restore_routes 0<< \EOF rc=$? if [[ $rc != 0 ]] then echo "ERROR: cl_route_change failed with code $rc" cl_route_change_RC=$rc fi EOF +epprd_rg:cl_swap_IP_address[alias_replace_routes:350] cl_route_change default 61.81.244.1 127.0.0.1 inet +epprd_rg:cl_swap_IP_address[alias_replace_routes:351] RC=0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:352] : cl_route_change completed with 0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:353] I=I+1 +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] read DEST GW FLAGS OTHER +epprd_rg:cl_swap_IP_address[alias_replace_routes:268] LOOPBACK=127.0.0.2 +epprd_rg:cl_swap_IP_address[alias_replace_routes:290] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:292] [[ '' != '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:292] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:294] route delete -host 61.81.244.0 61.81.244.156 61.81.244.156 host 61.81.244.0: gateway 61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] read DEST GW FLAGS OTHER +epprd_rg:cl_swap_IP_address[alias_replace_routes:268] LOOPBACK=127.0.0.2 +epprd_rg:cl_swap_IP_address[alias_replace_routes:272] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:272] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:272] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:274] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:276] [[ '' != '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:276] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:279] route delete -net 61.81.244/24 61.81.244.156 61.81.244.156 net 61.81.244: gateway 61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] read DEST GW FLAGS OTHER +epprd_rg:cl_swap_IP_address[alias_replace_routes:268] LOOPBACK=127.0.0.2 +epprd_rg:cl_swap_IP_address[alias_replace_routes:290] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:292] [[ '' != '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:292] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:294] route delete -host 61.81.244.255 61.81.244.156 61.81.244.156 host 61.81.244.255: gateway 61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] read DEST GW FLAGS OTHER +epprd_rg:cl_swap_IP_address[alias_replace_routes:360] echo 'exit $cl_route_change_RC' +epprd_rg:cl_swap_IP_address[alias_replace_routes:360] 1>> /usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[alias_replace_routes:361] chmod +x /usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[alias_replace_routes:362] return 0 +epprd_rg:cl_swap_IP_address[605] RC=0 +epprd_rg:cl_swap_IP_address[606] : alias_replace_routes completed with 0 +epprd_rg:cl_swap_IP_address[609] clifconfig en0 delete 61.81.244.156 +epprd_rg:clifconfig[117] version=1.9 +epprd_rg:clifconfig[121] set -A args en0 delete 61.81.244.156 +epprd_rg:clifconfig[124] interface=en0 +epprd_rg:clifconfig[125] shift +epprd_rg:clifconfig[127] [[ -n delete ]] +epprd_rg:clifconfig[130] delete_val=1 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n 61.81.244.156 ]] +epprd_rg:clifconfig[147] params=' address=61.81.244.156' +epprd_rg:clifconfig[147] addr=61.81.244.156 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n '' ]] +epprd_rg:clifconfig[174] [[ -n 1 ]] +epprd_rg:clifconfig[174] [[ -n epprd_rg ]] +epprd_rg:clifconfig[175] clwparname epprd_rg +epprd_rg:clwparname[38] version=1.3.1.1 +epprd_rg:clwparname[44] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clwparname[44] [[ -z '' ]] +epprd_rg:clwparname[44] exit 0 +epprd_rg:clifconfig[175] WPARNAME='' +epprd_rg:clifconfig[176] (( 0 == 0 )) +epprd_rg:clifconfig[176] [[ -n '' ]] +epprd_rg:clifconfig[218] belongs_to_an_active_wpar 61.81.244.156 +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] [[ -z '' ]] +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] return 1 +epprd_rg:clifconfig[218] read wpar_name wpar_if wpar_netmask wpar_broadcast +epprd_rg:clifconfig[218] IFS='~' +epprd_rg:clifconfig[219] rc=1 +epprd_rg:clifconfig[221] [[ 1 == 0 ]] +epprd_rg:clifconfig[275] ifconfig en0 delete 61.81.244.156 +epprd_rg:cl_swap_IP_address[611] [[ 1 == 1 ]] +epprd_rg:cl_swap_IP_address[613] [[ -n '' ]] +epprd_rg:cl_swap_IP_address[662] [[ -n 61.81.244.134 ]] +epprd_rg:cl_swap_IP_address[671] (( 720005 <= 710003 )) +epprd_rg:cl_swap_IP_address[675] clifconfig en0 alias 61.81.244.134 netmask 255.255.255.0 +epprd_rg:clifconfig[117] version=1.9 +epprd_rg:clifconfig[121] set -A args en0 alias 61.81.244.134 netmask 255.255.255.0 +epprd_rg:clifconfig[124] interface=en0 +epprd_rg:clifconfig[125] shift +epprd_rg:clifconfig[127] [[ -n alias ]] +epprd_rg:clifconfig[129] alias_val=1 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n 61.81.244.134 ]] +epprd_rg:clifconfig[147] params=' address=61.81.244.134' +epprd_rg:clifconfig[147] addr=61.81.244.134 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n netmask ]] +epprd_rg:clifconfig[149] params=' address=61.81.244.134 netmask=255.255.255.0' +epprd_rg:clifconfig[149] shift +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n '' ]] +epprd_rg:clifconfig[174] [[ -n 1 ]] +epprd_rg:clifconfig[174] [[ -n epprd_rg ]] +epprd_rg:clifconfig[175] clwparname epprd_rg +epprd_rg:clwparname[38] version=1.3.1.1 +epprd_rg:clwparname[44] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clwparname[44] [[ -z '' ]] +epprd_rg:clwparname[44] exit 0 +epprd_rg:clifconfig[175] WPARNAME='' +epprd_rg:clifconfig[176] (( 0 == 0 )) +epprd_rg:clifconfig[176] [[ -n '' ]] +epprd_rg:clifconfig[218] belongs_to_an_active_wpar 61.81.244.134 +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] [[ -z '' ]] +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] return 1 +epprd_rg:clifconfig[218] read wpar_name wpar_if wpar_netmask wpar_broadcast +epprd_rg:clifconfig[218] IFS='~' +epprd_rg:clifconfig[219] rc=1 +epprd_rg:clifconfig[221] [[ 1 == 0 ]] +epprd_rg:clifconfig[275] ifconfig en0 alias 61.81.244.134 netmask 255.255.255.0 +epprd_rg:cl_swap_IP_address[679] /usr/es/sbin/cluster/.restore_routes +epprd_rg:.restore_routes[+9] date +epprd_rg:.restore_routes[+9] : Starting /usr/es/sbin/cluster/.restore_routes at Sat Jan 28 19:42:40 KORST 2023 +epprd_rg:.restore_routes[+11] cl_route_change default 127.0.0.1 61.81.244.1 inet +epprd_rg:.restore_routes[+12] rc=0 +epprd_rg:.restore_routes[+13] [[ 0 != 0 ]] +epprd_rg:.restore_routes[+19] exit +epprd_rg:cl_swap_IP_address[680] [[ 0 == 0 ]] +epprd_rg:cl_swap_IP_address[680] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[681] : Completed /usr/es/sbin/cluster/.restore_routes with return code 0 +epprd_rg:cl_swap_IP_address[682] enable_pmtu_gated Setting tcp_pmtu_discover to 1 Setting udp_pmtu_discover to 1 +epprd_rg:cl_swap_IP_address[685] hats_adapter_notify en0 -d 61.81.244.156 alias 2023-01-28T19:42:40.619913 hats_adapter_notify 2023-01-28T19:42:40.621087 hats_adapter_notify +epprd_rg:cl_swap_IP_address[688] check_alias_status en0 61.81.244.156 release +epprd_rg:cl_swap_IP_address[check_alias_status:108] CH_INTERFACE=en0 +epprd_rg:cl_swap_IP_address[check_alias_status:109] CH_ADDRESS=61.81.244.156 +epprd_rg:cl_swap_IP_address[check_alias_status:110] CH_ACQ_OR_RLSE=release +epprd_rg:cl_swap_IP_address[check_alias_status:118] IF_IB=en0 +epprd_rg:cl_swap_IP_address[check_alias_status:120] awk '{print index($0, "ib")}' +epprd_rg:cl_swap_IP_address[check_alias_status:120] echo en0 +epprd_rg:cl_swap_IP_address[check_alias_status:120] IS_IB=0 +epprd_rg:cl_swap_IP_address[check_alias_status:122] [[ 0 != 1 ]] +epprd_rg:cl_swap_IP_address[check_alias_status:124] clifconfig en0 +epprd_rg:cl_swap_IP_address[check_alias_status:124] fgrep -w 61.81.244.156 +epprd_rg:cl_swap_IP_address[check_alias_status:124] awk '{print $2}' +epprd_rg:clifconfig[117] version=1.9 +epprd_rg:clifconfig[121] set -A args en0 +epprd_rg:clifconfig[124] interface=en0 +epprd_rg:clifconfig[125] shift +epprd_rg:clifconfig[127] [[ -n '' ]] +epprd_rg:clifconfig[174] [[ -n '' ]] +epprd_rg:clifconfig[218] belongs_to_an_active_wpar +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] [[ -z '' ]] +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] return 1 +epprd_rg:clifconfig[218] read wpar_name wpar_if wpar_netmask wpar_broadcast +epprd_rg:clifconfig[218] IFS='~' +epprd_rg:clifconfig[219] rc=1 +epprd_rg:clifconfig[221] [[ 1 == 0 ]] +epprd_rg:clifconfig[275] ifconfig en0 +epprd_rg:cl_swap_IP_address[check_alias_status:124] ADDR='' +epprd_rg:cl_swap_IP_address[check_alias_status:129] [ release = acquire ] +epprd_rg:cl_swap_IP_address[check_alias_status:139] [[ '' == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[check_alias_status:144] return 0 +epprd_rg:cl_swap_IP_address[689] RC1=0 +epprd_rg:cl_swap_IP_address[690] [[ 0 == 0 ]] +epprd_rg:cl_swap_IP_address[690] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[693] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[697] amlog_trace '' 'Deliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_swap_IP_address[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_swap_IP_address[amlog_trace:319] cltime +epprd_rg:cl_swap_IP_address[amlog_trace:319] DATE=2023-01-28T19:42:40.675528 +epprd_rg:cl_swap_IP_address[amlog_trace:320] echo '|2023-01-28T19:42:40.675528|INFO: Deliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_swap_IP_address[701] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[714] flush_arp +epprd_rg:cl_swap_IP_address[flush_arp:49] arp -an +epprd_rg:cl_swap_IP_address[flush_arp:49] grep '\?' +epprd_rg:cl_swap_IP_address[flush_arp:49] tr -d '()' +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.27 61.81.244.27 (61.81.244.27) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.218 61.81.244.218 (61.81.244.218) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.220 61.81.244.220 (61.81.244.220) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.221 61.81.244.221 (61.81.244.221) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.224 61.81.244.224 (61.81.244.224) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.239 61.81.244.239 (61.81.244.239) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.251 61.81.244.251 (61.81.244.251) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.252 61.81.244.252 (61.81.244.252) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.123 61.81.244.123 (61.81.244.123) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.126 61.81.244.126 (61.81.244.126) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.132 61.81.244.132 (61.81.244.132) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.145 61.81.244.145 (61.81.244.145) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.146 61.81.244.146 (61.81.244.146) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.1 61.81.244.1 (61.81.244.1) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.154 61.81.244.154 (61.81.244.154) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:52] return 0 +epprd_rg:cl_swap_IP_address[716] netstat -in Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll en0 1500 link#2 fa.e6.13.4e.a9.20 184317120 0 61161295 0 0 en0 1500 61.81.244 61.81.244.134 184317120 0 61161295 0 0 lo0 16896 link#1 35610087 0 35610087 0 0 lo0 16896 127 127.0.0.1 35610087 0 35610087 0 0 lo0 16896 ::1%1 35610087 0 35610087 0 0 +epprd_rg:cl_swap_IP_address[717] netstat -rnC Routing tables Destination Gateway Flags Wt Policy If Cost Config_Cost Route tree for Protocol Family 2 (Internet): default 61.81.244.1 UG 1 - en0 0 0 61.81.244.0 61.81.244.134 UHSb 1 - en0 0 0 => 61.81.244/24 61.81.244.134 U 1 - en0 0 0 61.81.244.134 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.255 61.81.244.134 UHSb 1 - en0 0 0 127/8 127.0.0.1 U 1 - lo0 0 0 Route tree for Protocol Family 24 (Internet v6): ::1%1 ::1%1 UH 1 - lo0 0 0 +epprd_rg:cl_swap_IP_address[989] no -o ipignoreredirects=0 Setting ipignoreredirects to 0 +epprd_rg:cl_swap_IP_address[992] cl_echo 32 'Completed execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0. Exit status = 0' /usr/es/sbin/cluster/events/utils/cl_swap_IP_address 'rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0' 0 Jan 28 2023 19:42:40Completed execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0. Exit status = 0+epprd_rg:cl_swap_IP_address[994] date Sat Jan 28 19:42:40 KORST 2023 +epprd_rg:cl_swap_IP_address[996] exit 0 +epprd_rg:release_service_addr[225] RC=0 +epprd_rg:release_service_addr[227] [[ 0 != 0 ]] +epprd_rg:release_service_addr[245] cl_RMupdate resource_down All_nonerror_service_addrs release_service_addr 2023-01-28T19:42:40.769154 2023-01-28T19:42:40.773644 +epprd_rg:release_service_addr[249] [[ UNDEFINED != UNDEFINED ]] +epprd_rg:release_service_addr[252] NSORDER='' +epprd_rg:release_service_addr[252] export NSORDER +epprd_rg:release_service_addr[255] exit 0 Jan 28 2023 19:42:40 EVENT COMPLETED: release_service_addr 0 |2023-01-28T19:42:40|8592|EVENT COMPLETED: release_service_addr 0| +epprd_rg:process_resources[release_service_labels:3129] RC=0 +epprd_rg:process_resources[release_service_labels:3131] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[release_service_labels:3146] (( 0 != 0 )) +epprd_rg:process_resources[release_service_labels:3152] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. +epprd_rg:process_resources[release_service_labels:3154] return 0 +epprd_rg:process_resources[3412] RC=0 +epprd_rg:process_resources[3413] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T19:42:41.700169 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=WPAR ACTION=RELEASE RESOURCE_GROUPS='"epprd_rg' '"' +epprd_rg:process_resources[1] JOB_TYPE=WPAR +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ WPAR == RELEASE ]] +epprd_rg:process_resources[3360] [[ WPAR == ONLINE ]] +epprd_rg:process_resources[3492] process_wpars RELEASE +epprd_rg:process_resources[process_wpars:3265] PS4_FUNC=process_wpars +epprd_rg:process_resources[process_wpars:3265] typeset PS4_FUNC +epprd_rg:process_resources[process_wpars:3266] [[ high == high ]] +epprd_rg:process_resources[process_wpars:3266] set -x +epprd_rg:process_resources[process_wpars:3267] STAT=0 +epprd_rg:process_resources[process_wpars:3268] action=RELEASE +epprd_rg:process_resources[process_wpars:3268] typeset action +epprd_rg:process_resources[process_wpars:3272] export GROUPNAME +epprd_rg:process_resources[process_wpars:3280] clstop_wpar +epprd_rg:clstop_wpar[42] version=1.7 +epprd_rg:clstop_wpar[46] [[ rg_move == reconfig_resource_release ]] +epprd_rg:clstop_wpar[46] [[ RELEASE_PRIMARY == reconfig_resource_release ]] +epprd_rg:clstop_wpar[55] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clstop_wpar[55] [[ -z '' ]] +epprd_rg:clstop_wpar[55] exit 0 +epprd_rg:process_resources[process_wpars:3281] RC=0 +epprd_rg:process_resources[process_wpars:3285] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[process_wpars:3294] return 0 +epprd_rg:process_resources[3493] RC=0 +epprd_rg:process_resources[3495] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3497] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T19:42:41.733203 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=OFFLINE RESOURCE_GROUPS='"epprd_rg"' +epprd_rg:process_resources[1] JOB_TYPE=OFFLINE +epprd_rg:process_resources[1] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ OFFLINE == RELEASE ]] +epprd_rg:process_resources[3360] [[ OFFLINE == ONLINE ]] +epprd_rg:process_resources[3681] set_resource_group_state DOWN +epprd_rg:process_resources[set_resource_group_state:82] PS4_FUNC=set_resource_group_state +epprd_rg:process_resources[set_resource_group_state:82] typeset PS4_FUNC +epprd_rg:process_resources[set_resource_group_state:83] [[ high == high ]] +epprd_rg:process_resources[set_resource_group_state:83] set -x +epprd_rg:process_resources[set_resource_group_state:84] STAT=0 +epprd_rg:process_resources[set_resource_group_state:85] new_status=DOWN +epprd_rg:process_resources[set_resource_group_state:89] export GROUPNAME +epprd_rg:process_resources[set_resource_group_state:90] [[ DOWN != DOWN ]] +epprd_rg:process_resources[set_resource_group_state:100] : Resource Manager Updates +epprd_rg:process_resources[set_resource_group_state:122] cl_RMupdate rg_down epprd_rg process_resources 2023-01-28T19:42:41.759203 2023-01-28T19:42:41.763383 +epprd_rg:process_resources[set_resource_group_state:124] amlog_trace '' 'acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:318] clcycle clavailability.log +epprd_rg:process_resources[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:process_resources[amlog_trace:319] cltime +epprd_rg:process_resources[amlog_trace:319] DATE=2023-01-28T19:42:41.793691 +epprd_rg:process_resources[amlog_trace:320] echo '|2023-01-28T19:42:41.793691|INFO: acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:process_resources[set_resource_group_state:153] return 0 +epprd_rg:process_resources[3682] RC=0 +epprd_rg:process_resources[3683] postvg_for_rdisk +epprd_rg:process_resources[postvg_for_rdisk:856] PS4_FUNC=postvg_for_rdisk +epprd_rg:process_resources[postvg_for_rdisk:856] typeset PS4_FUNC +epprd_rg:process_resources[postvg_for_rdisk:857] [[ high == high ]] +epprd_rg:process_resources[postvg_for_rdisk:857] set -x +epprd_rg:process_resources[postvg_for_rdisk:858] STAT=0 +epprd_rg:process_resources[postvg_for_rdisk:859] FAILURE_IN_METHOD=0 +epprd_rg:process_resources[postvg_for_rdisk:859] typeset -li FAILURE_IN_METHOD +epprd_rg:process_resources[postvg_for_rdisk:860] LIST_OF_FAILED_RGS='' +epprd_rg:process_resources[postvg_for_rdisk:861] RG_LIST=epprd_rg +epprd_rg:process_resources[postvg_for_rdisk:862] RDISK_LIST='' +epprd_rg:process_resources[postvg_for_rdisk:863] DISK_LIST='' +epprd_rg:process_resources[postvg_for_rdisk:866] : Resource groups are processed individually. This is required because +epprd_rg:process_resources[postvg_for_rdisk:867] : the replication mechanism may differ between resource groups. +epprd_rg:process_resources[postvg_for_rdisk:871] getReplicatedResources epprd_rg +epprd_rg:process_resources[getReplicatedResources:699] PS4_FUNC=getReplicatedResources +epprd_rg:process_resources[getReplicatedResources:699] typeset PS4_FUNC +epprd_rg:process_resources[getReplicatedResources:700] [[ high == high ]] +epprd_rg:process_resources[getReplicatedResources:700] set -x +epprd_rg:process_resources[getReplicatedResources:702] RV=false +epprd_rg:process_resources[getReplicatedResources:704] clodmget -n -f type HACMPrresmethods +epprd_rg:process_resources[getReplicatedResources:704] [[ -n 9 ]] +epprd_rg:process_resources[getReplicatedResources:707] : Replicated resource methods are defined, check for resources +epprd_rg:process_resources[getReplicatedResources:709] clodmget -q $'name like \'*_REP_RESOURCE\' AND group=epprd_rg' -f value -n HACMPresource +epprd_rg:process_resources[getReplicatedResources:709] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:718] : Verify if any backup profiles are configured and trigger cbm utilities based on that +epprd_rg:process_resources[getReplicatedResources:720] clodmget -q name=BACKUP_ENABLED -f value HACMPresource +epprd_rg:process_resources[getReplicatedResources:720] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:739] echo false +epprd_rg:process_resources[postvg_for_rdisk:871] REPLICATED_RESOURCES=false +epprd_rg:process_resources[postvg_for_rdisk:873] [[ false == true ]] +epprd_rg:process_resources[postvg_for_rdisk:946] return 0 +epprd_rg:process_resources[3684] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T19:42:41.817932 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=NONE +epprd_rg:process_resources[1] JOB_TYPE=NONE +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ NONE == RELEASE ]] +epprd_rg:process_resources[3360] [[ NONE == ONLINE ]] +epprd_rg:process_resources[3729] break +epprd_rg:process_resources[3740] : If sddsrv was turned off above, turn it back on again +epprd_rg:process_resources[3742] [[ FALSE == TRUE ]] +epprd_rg:process_resources[3747] exit 0 :rg_move[247] : unsetting AM_SYNC_CALLED_BY from $'callers environment as\n: we dont' require it after this point in execution. :rg_move[250] unset AM_SYNC_CALLED_BY :rg_move[253] [[ -f /tmp/.NFSSTOPPED ]] :rg_move[274] [[ -f /tmp/.RPCLOCKDSTOPPED ]] :rg_move[293] exit 0 Jan 28 2023 19:42:41 EVENT COMPLETED: rg_move epprda 1 RELEASE 0 |2023-01-28T19:42:41|8592|EVENT COMPLETED: rg_move epprda 1 RELEASE 0| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-01-28T19:42:41.904357 :clevlog[amlog_trace:320] echo '|2023-01-28T19:42:41.904357|INFO: rg_move|epprd_rg|epprda|1|RELEASE|0' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :rg_move_release[+68] exit 0 Jan 28 2023 19:42:41 EVENT COMPLETED: rg_move_release epprda 1 0 |2023-01-28T19:42:41|8592|EVENT COMPLETED: rg_move_release epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T19:42:42.013638 + echo '|2023-01-28T19:42:42.013638|INFO: rg_move_release|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 19:42:44 EVENT START: rg_move_fence epprda 1 |2023-01-28T19:42:44|8592|EVENT START: rg_move_fence epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T19:42:44.208037 + echo '|2023-01-28T19:42:44.208037|INFO: rg_move_fence|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_fence[62] [[ high == high ]] :rg_move_fence[62] version=1.11 :rg_move_fence[63] NODENAME=epprda :rg_move_fence[63] export NODENAME :rg_move_fence[65] set -u :rg_move_fence[67] [ 2 != 2 ] :rg_move_fence[73] set +u :rg_move_fence[75] [[ -z TRUE ]] :rg_move_fence[80] [[ TRUE == TRUE ]] :rg_move_fence[82] LOCAL_NODENAME=epprda :rg_move_fence[83] odmget -qid=1 HACMPgroup :rg_move_fence[83] egrep 'group =' :rg_move_fence[83] awk '{print $3}' :rg_move_fence[83] eval RGNAME='"epprd_rg"' :rg_move_fence[1] RGNAME=epprd_rg +epprd_rg:rg_move_fence[84] GROUPNAME=epprd_rg +epprd_rg:rg_move_fence[85] group_state='$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[86] set +u +epprd_rg:rg_move_fence[87] eval print '$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[1] print ONLINE +epprd_rg:rg_move_fence[87] RG_MOVE_ONLINE=ONLINE +epprd_rg:rg_move_fence[87] export RG_MOVE_ONLINE +epprd_rg:rg_move_fence[88] set -u +epprd_rg:rg_move_fence[89] RG_MOVE_ONLINE=ONLINE +epprd_rg:rg_move_fence[91] set -a +epprd_rg:rg_move_fence[92] clsetenvgrp epprda rg_move epprd_rg '' :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 +epprd_rg:rg_move_fence[92] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[93] RC=0 +epprd_rg:rg_move_fence[94] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[1] FORCEDOWN_GROUPS='' +epprd_rg:rg_move_fence[2] RESOURCE_GROUPS='' +epprd_rg:rg_move_fence[3] HOMELESS_GROUPS='' +epprd_rg:rg_move_fence[4] HOMELESS_FOLLOWER_GROUPS='' +epprd_rg:rg_move_fence[5] ERRSTATE_GROUPS='' +epprd_rg:rg_move_fence[6] PRINCIPAL_ACTIONS='' +epprd_rg:rg_move_fence[7] ASSOCIATE_ACTIONS='' +epprd_rg:rg_move_fence[8] AUXILLIARY_ACTIONS='' +epprd_rg:rg_move_fence[8] SIBLING_GROUPS='' +epprd_rg:rg_move_fence[9] SIBLING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[10] SIBLING_ACQUIRING_GROUPS='' +epprd_rg:rg_move_fence[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[12] SIBLING_RELEASING_GROUPS='' +epprd_rg:rg_move_fence[13] SIBLING_RELEASING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[95] set +a +epprd_rg:rg_move_fence[96] [ 0 -ne 0 ] +epprd_rg:rg_move_fence[103] process_resources FENCE :rg_move_fence[3318] version=1.169 :rg_move_fence[3321] STATUS=0 :rg_move_fence[3322] sddsrv_off=FALSE :rg_move_fence[3324] true :rg_move_fence[3326] : call rgpa, and it will tell us what to do next :rg_move_fence[3328] set -a :rg_move_fence[3329] clRGPA FENCE :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa FENCE 2023-01-28T19:42:44.313359 clrgpa :clRGPA[+55] exit 0 :rg_move_fence[3329] eval JOB_TYPE=NONE :rg_move_fence[1] JOB_TYPE=NONE :rg_move_fence[3330] RC=0 :rg_move_fence[3331] set +a :rg_move_fence[3333] (( 0 != 0 )) :rg_move_fence[3342] RESOURCE_GROUPS='' :rg_move_fence[3343] GROUPNAME='' :rg_move_fence[3343] export GROUPNAME :rg_move_fence[3353] IS_SERVICE_START=1 :rg_move_fence[3354] IS_SERVICE_STOP=1 :rg_move_fence[3360] [[ NONE == RELEASE ]] :rg_move_fence[3360] [[ NONE == ONLINE ]] :rg_move_fence[3729] break :rg_move_fence[3740] : If sddsrv was turned off above, turn it back on again :rg_move_fence[3742] [[ FALSE == TRUE ]] :rg_move_fence[3747] exit 0 +epprd_rg:rg_move_fence[104] : exit status of process_resources FENCE is: 0 +epprd_rg:rg_move_fence[107] [[ TRUE == TRUE ]] +epprd_rg:rg_move_fence[109] export EVENT_TYPE +epprd_rg:rg_move_fence[110] echo RELEASE_PRIMARY RELEASE_PRIMARY +epprd_rg:rg_move_fence[111] [[ -n '' ]] +epprd_rg:rg_move_fence[141] exit 0 Jan 28 2023 19:42:44 EVENT COMPLETED: rg_move_fence epprda 1 0 |2023-01-28T19:42:44|8592|EVENT COMPLETED: rg_move_fence epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T19:42:44.408760 + echo '|2023-01-28T19:42:44.408760|INFO: rg_move_fence|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log PowerHA SystemMirror Event Summary ---------------------------------------------------------------------------- Serial number for this event: 8592 Event: TE_RG_MOVE_RELEASE Start time: Sat Jan 28 19:41:11 2023 End time: Sat Jan 28 19:42:44 2023 Action: Resource: Script Name: ---------------------------------------------------------------------------- Releasing resource group: epprd_rg process_resources Search on: Sat.Jan.28.19:41:11.KORST.2023.process_resources.epprd_rg.ref Releasing resource: All_servers stop_server Search on: Sat.Jan.28.19:41:11.KORST.2023.stop_server.All_servers.epprd_rg.ref Resource offline: All_nonerror_servers stop_server Search on: Sat.Jan.28.19:42:31.KORST.2023.stop_server.All_nonerror_servers.epprd_rg.ref Releasing resource: All_exports cl_unexport_fs Search on: Sat.Jan.28.19:42:32.KORST.2023.cl_unexport_fs.All_exports.epprd_rg.ref Resource offline: All_nonerror_exports cl_unexport_fs Search on: Sat.Jan.28.19:42:32.KORST.2023.cl_unexport_fs.All_nonerror_exports.epprd_rg.ref Releasing resource: All_filesystems cl_deactivate_fs Search on: Sat.Jan.28.19:42:33.KORST.2023.cl_deactivate_fs.All_filesystems.epprd_rg.ref Resource offline: All_non_error_filesystems cl_deactivate_fs Search on: Sat.Jan.28.19:42:38.KORST.2023.cl_deactivate_fs.All_non_error_filesystems.epprd_rg.ref Releasing resource: All_volume_groups cl_deactivate_vgs Search on: Sat.Jan.28.19:42:38.KORST.2023.cl_deactivate_vgs.All_volume_groups.epprd_rg.ref Resource offline: All_nonerror_volume_groups cl_deactivate_vgs Search on: Sat.Jan.28.19:42:40.KORST.2023.cl_deactivate_vgs.All_nonerror_volume_groups.epprd_rg.ref Releasing resource: All_service_addrs release_service_addr Search on: Sat.Jan.28.19:42:40.KORST.2023.release_service_addr.All_service_addrs.epprd_rg.ref Resource offline: All_nonerror_service_addrs release_service_addr Search on: Sat.Jan.28.19:42:40.KORST.2023.release_service_addr.All_nonerror_service_addrs.epprd_rg.ref Resource group offline: epprd_rg process_resources Search on: Sat.Jan.28.19:42:41.KORST.2023.process_resources.epprd_rg.ref ---------------------------------------------------------------------------- |EVENT_SUMMARY_START|TE_RG_MOVE_RELEASE|2023-01-28T19:41:11|2023-01-28T19:42:44|8592| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:41:11.KORST.2023.process_resources.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:41:11.KORST.2023.stop_server.All_servers.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:42:31.KORST.2023.stop_server.All_nonerror_servers.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:42:32.KORST.2023.cl_unexport_fs.All_exports.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:42:32.KORST.2023.cl_unexport_fs.All_nonerror_exports.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:42:33.KORST.2023.cl_deactivate_fs.All_filesystems.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:42:38.KORST.2023.cl_deactivate_fs.All_non_error_filesystems.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:42:38.KORST.2023.cl_deactivate_vgs.All_volume_groups.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:42:40.KORST.2023.cl_deactivate_vgs.All_nonerror_volume_groups.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:42:40.KORST.2023.release_service_addr.All_service_addrs.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:42:40.KORST.2023.release_service_addr.All_nonerror_service_addrs.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:42:41.KORST.2023.process_resources.epprd_rg.ref.ref| |EVENT_SUMMARY_END| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 8597 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_NETWORK|2023-01-28T19:42:46|8597| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Jan 28 2023 19:42:46 EVENT START: network_up epprda net_ether_01 |2023-01-28T19:42:46|8597|EVENT START: network_up epprda net_ether_01| :network_up[+66] version=%I% :network_up[+69] set -a :network_up[+70] cllsparam -n epprda :network_up[+70] eval NODE_NAME=epprda VERBOSE_LOGGING=high PS4='${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO] ' DEBUG_LEVEL=Standard LC_ALL='C' :network_up[+70] NODE_NAME=epprda VERBOSE_LOGGING=high PS4=${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO] DEBUG_LEVEL=Standard LC_ALL=C :network_up[+71] set +a :network_up[+73] STATUS=0 :network_up[+75] [ 2 -ne 2 ] :network_up[+81] [[ epprda == epprda ]] :network_up[+82] amlog_trace 8597|epprda|net_ether_01 :network_up[+61] clcycle clavailability.log :network_up[+61] 1> /dev/null 2>& 1 :network_up[+61] :network_up[+61] cltime DATE=2023-01-28T19:42:46.657520 :network_up[+61] echo |2023-01-28T19:42:46.657520|INFO: 8597|epprda|net_ether_01 :network_up[+61] 1>> /var/hacmp/availability/clavailability.log :network_up[+84] export NETWORKNAME=net_ether_01 :network_up[+89] [[ epprda == epprda ]] :network_up[+90] amlog_trace 8597|epprda|net_ether_01 :network_up[+61] clcycle clavailability.log :network_up[+61] 1> /dev/null 2>& 1 :network_up[+61] :network_up[+61] cltime DATE=2023-01-28T19:42:46.685200 :network_up[+61] echo |2023-01-28T19:42:46.685200|INFO: 8597|epprda|net_ether_01 :network_up[+61] 1>> /var/hacmp/availability/clavailability.log :network_up[+92] exit 0 Jan 28 2023 19:42:46 EVENT COMPLETED: network_up epprda net_ether_01 0 |2023-01-28T19:42:46|8597|EVENT COMPLETED: network_up epprda net_ether_01 0| Jan 28 2023 19:42:46 EVENT START: network_up_complete epprda net_ether_01 |2023-01-28T19:42:46|8597|EVENT START: network_up_complete epprda net_ether_01| :network_up_complete[+68] version=%I% :network_up_complete[+72] [ 2 -ne 2 ] :network_up_complete[+78] [[ epprda == epprda ]] :network_up_complete[+79] amlog_trace 8597|epprda|net_ether_01 :network_up_complete[+61] clcycle clavailability.log :network_up_complete[+61] 1> /dev/null 2>& 1 :network_up_complete[+61] :network_up_complete[+61] cltime DATE=2023-01-28T19:42:46.966176 :network_up_complete[+61] echo |2023-01-28T19:42:46.966176|INFO: 8597|epprda|net_ether_01 :network_up_complete[+61] 1>> /var/hacmp/availability/clavailability.log :network_up_complete[+82] NODENAME=epprda :network_up_complete[+83] NETWORK=net_ether_01 :network_up_complete[+84] export NETWORKNAME=net_ether_01 :network_up_complete[+86] [[ -z ]] :network_up_complete[+88] EMULATE=REAL :network_up_complete[+90] set -u :network_up_complete[+96] STATUS=0 :network_up_complete[+100] odmget HACMPnode :network_up_complete[+100] grep name = :network_up_complete[+100] sort :network_up_complete[+100] uniq :network_up_complete[+100] wc -l :network_up_complete[+100] [ 2 -eq 2 ] :network_up_complete[+102] :network_up_complete[+102] odmget HACMPgroup :network_up_complete[+102] grep group = :network_up_complete[+102] awk {print $3} :network_up_complete[+102] sed s/"//g RESOURCE_GROUPS=epprd_rg :network_up_complete[+106] :network_up_complete[+106] odmget -q group=epprd_rg AND name=EXPORT_FILESYSTEM HACMPresource :network_up_complete[+106] grep value :network_up_complete[+106] awk {print $3} :network_up_complete[+106] sed s/"//g EXPORTLIST=/board_org /sapmnt/EPP :network_up_complete[+107] [ -n /board_org /sapmnt/EPP ] :network_up_complete[+109] [ REAL = EMUL ] :network_up_complete[+114] cl_update_statd :cl_update_statd(0)[+174] version=%I% :cl_update_statd(0)[+176] typeset -i RC=0 :cl_update_statd(0)[+178] LOCAL_FOUND= :cl_update_statd(0)[+179] TWIN_NAME= :cl_update_statd(0)[+180] [[ -z epprda ]] :cl_update_statd(0)[+181] :cl_update_statd(0)[+181] cl_get_path -S OP_SEP=~ :cl_update_statd(0)[+182] set -u :cl_update_statd(0)[+187] LOCAL_FOUND=true :cl_update_statd(0)[+189] TWIN_NAME=epprds :cl_update_statd(0)[+194] : Make sure statd is running locally :cl_update_statd(0)[+196] lssrc -s statd :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(1)[+196] grep -qw inoperative :cl_update_statd(1)[+196] rpcinfo -p :cl_update_statd(1)[+196] LC_ALL=C :cl_update_statd(1)[+196] grep -qw status :cl_update_statd(1)[+207] : Get the current twin, if there is one :cl_update_statd(1)[+209] :cl_update_statd(1)[+209] nfso -H sm_gethost :cl_update_statd(1)[+209] 2>& 1 CURTWIN=epprds :cl_update_statd(1)[+210] RC=0 :cl_update_statd(1)[+212] [[ -z true ]] :cl_update_statd(1)[+212] [[ -z epprds ]] :cl_update_statd(1)[+225] : Get the interface to the twin node :cl_update_statd(1)[+227] :cl_update_statd(1)[+227] get_node_ip epprds :cl_update_statd(1)[+9] (( 1 != 1 )) :cl_update_statd(1)[+15] Twin_Name=epprds :cl_update_statd(1)[+16] NewTwin= :cl_update_statd(1)[+19] : Get the Interface details for every interface on the twin node :cl_update_statd(1)[+20] : Reject interfaces on nodes that are not public boot addresses :cl_update_statd(1)[+21] : because those are the only ones we have state information for :cl_update_statd(1)[+23] :cl_update_statd(1)[+23] cllsif -J ~ -Sw -i epprda :cl_update_statd(1)[+23] LC_ALL=C LOCAL_NETWORK_INFO=epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(1)[+24] cllsif -J ~ -Sw -i epprds :cl_update_statd(1)[+24] LC_ALL=C :cl_update_statd(1)[+25] read adapt type network net_type attrib node ip_addr skip interface skip netmask skip skip prefix ip_family :cl_update_statd(1)[+25] IFS=~ :cl_update_statd(1)[+25] [[ public != public ]] :cl_update_statd(1)[+25] [[ boot != boot ]] :cl_update_statd(1)[+33] : Find the state of this candidate :cl_update_statd(1)[+33] [[ AF_INET == AF_INET ]] :cl_update_statd(1)[+37] :cl_update_statd(1)[+37] print 61.81.244.123 :cl_update_statd(1)[+37] tr ./ xx addr=i61x81x244x123_epprds :cl_update_statd(1)[+43] eval candidate_state=${i61x81x244x123_epprds:-down} :cl_update_statd(1)[+43] candidate_state=UP :cl_update_statd(1)[+46] : If state is UP, check to see if this node can talk to it :cl_update_statd(1)[+46] [[ UP == UP ]] :cl_update_statd(1)[+50] ping -w 5 -c 1 -q 61.81.244.123 :cl_update_statd(1)[+50] 1> /dev/null :cl_update_statd(1)[+61] echo epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(1)[+61] tr \n :cl_update_statd(1)[+62] read lcl_adapt lcl_type lcl_network lcl_net_type lcl_attrib lcl_node lcl_ip_addr skip lcl_interface skip lcl_netmask skip skip lcl_prefix lcl_ip_family :cl_update_statd(1)[+62] IFS=~ :cl_update_statd(1)[+62] [[ net_ether_01 != net_ether_01 ]] :cl_update_statd(1)[+62] [[ boot != boot ]] :cl_update_statd(1)[+62] [[ public != public ]] :cl_update_statd(1)[+62] [[ AF_INET != AF_INET ]] :cl_update_statd(1)[+62] [[ AF_INET == AF_INET ]] :cl_update_statd(1)[+71] :cl_update_statd(1)[+71] print 61.81.244.134 :cl_update_statd(1)[+71] tr ./ xx addr=i61x81x244x134_epprda :cl_update_statd(1)[+77] eval lcl_candidate_state=${i61x81x244x134_epprda:-down} :cl_update_statd(1)[+77] lcl_candidate_state=UP :cl_update_statd(1)[+77] [[ UP == UP ]] :cl_update_statd(1)[+81] : epprds is on the same network as an interface that is up :cl_update_statd(1)[+82] : on the local node, and the attributes match. :cl_update_statd(1)[+84] NewTwin=epprds :cl_update_statd(1)[+85] break :cl_update_statd(1)[+85] [[ -n epprds ]] :cl_update_statd(1)[+91] break :cl_update_statd(1)[+91] [[ -z epprds ]] :cl_update_statd(1)[+100] echo epprds :cl_update_statd(1)[+101] return 0 NEWTWIN=epprds :cl_update_statd(1)[+227] [[ -z epprds ]] :cl_update_statd(1)[+227] [[ epprds != epprds ]] :cl_update_statd(1)[+259] : RC is actually 0 :cl_update_statd(1)[+266] return 0 :network_up_complete[+115] [ 0 -ne 0 ] :network_up_complete[+120] break :network_up_complete[+125] [[ epprda == epprda ]] :network_up_complete[+131] :network_up_complete[+131] odmget -qname=net_ether_01 HACMPnetwork :network_up_complete[+131] awk $1 == "alias" {print $3} :network_up_complete[+131] sed s/"//g ALIASING=1 :network_up_complete[+131] [[ 1 == 1 ]] :network_up_complete[+133] cl_configure_persistent_address aliasing_network_up -n net_ether_01 :cl_configure_persistent_address[1344] version=1.56.1.4 :cl_configure_persistent_address[1346] cl_get_path -S :cl_configure_persistent_address[1346] OP_SEP='~' :cl_configure_persistent_address[1349] get_local_nodename :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :cl_configure_persistent_address[1349] LOCALNODENAME=epprda :cl_configure_persistent_address[1354] [[ -z epprda ]] :cl_configure_persistent_address[1356] NETWORK='' :cl_configure_persistent_address[1357] ALIVE_IF='' :cl_configure_persistent_address[1358] FAILED_IF='' :cl_configure_persistent_address[1359] FAILED_ADDRESS='' :cl_configure_persistent_address[1360] UPDATE_CLSTRMGR=1 :cl_configure_persistent_address[1361] CHECK_HA_ALIVE=1 :cl_configure_persistent_address[1362] RESTORE_ROUTES=/usr/es/sbin/cluster/.pers_restore_routes :cl_configure_persistent_address[1363] RC=0 :cl_configure_persistent_address[1364] B_FLAG=0 :cl_configure_persistent_address[1366] ACTION=aliasing_network_up :cl_configure_persistent_address[1367] shift :cl_configure_persistent_address[1369] getopt n:a:f:i:dPB -n net_ether_01 :cl_configure_persistent_address[1369] set -- -n net_ether_01 -- :cl_configure_persistent_address[1371] (( 0 != 0 )) :cl_configure_persistent_address[1371] [[ -z aliasing_network_up ]] :cl_configure_persistent_address[1376] [[ -n != -- ]] :cl_configure_persistent_address[1379] NETWORK=net_ether_01 :cl_configure_persistent_address[1380] shift :cl_configure_persistent_address[1380] shift :cl_configure_persistent_address[1376] [[ -- != -- ]] :cl_configure_persistent_address[1418] shift :cl_configure_persistent_address[1422] [[ aliasing_network_up == up ]] :cl_configure_persistent_address[1520] [[ aliasing_network_up == swap ]] :cl_configure_persistent_address[1667] [[ aliasing_network_up == fail_boot ]] :cl_configure_persistent_address[1830] [[ aliasing_network_up == aliasing_network_up ]] :cl_configure_persistent_address[1831] [[ -z net_ether_01 ]] :cl_configure_persistent_address[1837] isAliasingNetwork net_ether_01 :cl_configure_persistent_address[isAliasingNetwork:386] PS4_FUNC=isAliasingNetwork :cl_configure_persistent_address[isAliasingNetwork:386] typeset PS4_FUNC :cl_configure_persistent_address[isAliasingNetwork:387] [[ high == high ]] :cl_configure_persistent_address[isAliasingNetwork:387] set -x :cl_configure_persistent_address[isAliasingNetwork:389] NETWORK=net_ether_01 :cl_configure_persistent_address[isAliasingNetwork:392] awk '$1 == "alias" {print $3}' :cl_configure_persistent_address[isAliasingNetwork:391] odmget -qname=net_ether_01 HACMPnetwork :cl_configure_persistent_address[isAliasingNetwork:393] sed 's/"//g' :cl_configure_persistent_address[isAliasingNetwork:391] print 1 :cl_configure_persistent_address[1837] [[ 1 != 1 ]] :cl_configure_persistent_address[1842] cllsif -J '~' -Spi epprda :cl_configure_persistent_address[1842] awk -F~ '$2 == "persistent" && $3 == "net_ether_01" {print $1}' :cl_configure_persistent_address[1842] PERSISTENT='' :cl_configure_persistent_address[1844] [[ -z '' ]] :cl_configure_persistent_address[1846] exit 0 :network_up_complete[+141] :network_up_complete[+141] cl_rrmethods2call net_initialization :cl_rrmethods2call[56] version=%I% :cl_rrmethods2call[84] RRMETHODS='' :cl_rrmethods2call[85] NEED_RR_ENV_VARS=no :cl_rrmethods2call[90] : The network methods are returned if the Network type is XD_data. :cl_rrmethods2call[92] clodmget -qname=net_ether_01 -f nimname -n HACMPnetwork :cl_rrmethods2call[92] RRNET=ether :cl_rrmethods2call[94] [[ ether == XD_data ]] :cl_rrmethods2call[98] return 0 METHODS= :network_up_complete[+163] :network_up_complete[+163] clodmget -n -q name=MOUNT_FILESYSTEM -f group HACMPresource CROSSMOUNTS=epprd_rg :network_up_complete[+165] [ -n epprd_rg -a epprda = epprda ] :network_up_complete[+168] : Remount any NFS cross mount if required :network_up_complete[+174] :network_up_complete[+174] clodmget -n -f group HACMPgroup RESOURCE_GROUPS=epprd_rg :network_up_complete[+185] :network_up_complete[+185] clodmget -n -q name=MOUNT_FILESYSTEM and group=epprd_rg -f value HACMPresource MOUNT_FILESYSTEM=/board;/board_org :network_up_complete[+185] [[ -z /board;/board_org ]] :network_up_complete[+189] IN_RG=false :network_up_complete[+189] clodmget -n -q group=epprd_rg -f nodes HACMPgroup :network_up_complete[+189] [[ epprda == epprda ]] :network_up_complete[+192] IN_RG=true :network_up_complete[+192] [[ epprds == epprda ]] :network_up_complete[+192] [[ true == false ]] :network_up_complete[+197] :network_up_complete[+197] clRGinfo -s epprd_rg :network_up_complete[+197] awk -F : { if ( $2 == "ONLINE" ) print $3 } clRGinfo[431]: version I clRGinfo[517]: Number of resource groups = 1 clRGinfo[562]: cluster epprda_cluster is version = 22 clRGinfo[1439]: IPC target host name is 'localhost' clRGinfo[685]: Current group is 'epprd_rg' get primary state info for state 4 get secondary state info for state 4 getPrimaryStateStr: using primary_table => primary_state_for_short_output_table get primary state info for state 4 get secondary state info for state 4 getPreviousStateString: Primary=4, Sec=-1 get primary state info for state 4 get secondary state info for state 4 getPrimaryStateStr: using primary_table => primary_state_for_short_output_table get primary state info for state 4 get secondary state info for state 4 getPreviousStateString: Primary=4, Sec=-1 NFS_HOST= :network_up_complete[+197] [[ -z ]] :network_up_complete[+198] continue :network_up_complete[+257] [[ epprda == epprda ]] :network_up_complete[+257] [[ 0 -ne 0 ]] :network_up_complete[+262] amlog_trace 8597|epprda|net_ether_01 :network_up_complete[+61] clcycle clavailability.log :network_up_complete[+61] 1> /dev/null 2>& 1 :network_up_complete[+61] :network_up_complete[+61] cltime DATE=2023-01-28T19:42:47.165682 :network_up_complete[+61] echo |2023-01-28T19:42:47.165682|INFO: 8597|epprda|net_ether_01 :network_up_complete[+61] 1>> /var/hacmp/availability/clavailability.log :network_up_complete[+265] exit 0 Jan 28 2023 19:42:47 EVENT COMPLETED: network_up_complete epprda net_ether_01 0 |2023-01-28T19:42:47|8597|EVENT COMPLETED: network_up_complete epprda net_ether_01 0| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 8596 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_FAIL_ADAPTER|2023-01-28T19:42:49|8596| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Jan 28 2023 19:42:49 EVENT START: fail_interface epprda 61.81.244.134 |2023-01-28T19:42:49|8596|EVENT START: fail_interface epprda 61.81.244.134| :fail_interface[+64] version=%I% :fail_interface[+66] :fail_interface[+66] cl_get_path -S OP_SEP=~ :fail_interface[+68] [ 2 -ne 2 ] :fail_interface[+74] NODENAME=epprda :fail_interface[+75] ADDR=61.81.244.134 :fail_interface[+76] PREFIX_LEN= :fail_interface[+77] ADDR_FAMILY= :fail_interface[+79] set -u :fail_interface[+81] :fail_interface[+81] dspmsg scripts.cat 8062 Interface 61.81.244.134 has failed on node epprda.\n 61.81.244.134 epprda MSG=Interface 61.81.244.134 has failed on node epprda. :fail_interface[+82] echo Interface 61.81.244.134 has failed on node epprda. :fail_interface[+82] 1> /dev/console :fail_interface[+84] [[ epprda = epprda ]] :fail_interface[+88] :fail_interface[+88] cllsif -J ~ -Sn 61.81.244.134 :fail_interface[+88] cut -d~ -f3 NETWORK=net_ether_01 :fail_interface[+91] :fail_interface[+91] odmget -qname=net_ether_01 HACMPnetwork :fail_interface[+91] awk $1 == "alias" {print $3} :fail_interface[+91] sed s/"//g ALIASING=1 :fail_interface[+91] [[ 1 = 1 ]] :fail_interface[+96] set +u :fail_interface[+97] saveNSORDER=UNDEFINED :fail_interface[+98] set -u :fail_interface[+99] NSORDER=local :fail_interface[+99] export NSORDER :fail_interface[+100] netstat -in Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll en0 1500 link#2 fa.e6.13.4e.a9.20 184317677 0 61161851 0 0 en0 1500 61.81.244 61.81.244.134 184317677 0 61161851 0 0 lo0 16896 link#1 35610164 0 35610164 0 0 lo0 16896 127 127.0.0.1 35610164 0 35610164 0 0 lo0 16896 ::1%1 35610164 0 35610164 0 0 :fail_interface[+101] netstat -rnC Routing tables Destination Gateway Flags Wt Policy If Cost Config_Cost Route tree for Protocol Family 2 (Internet): default 61.81.244.1 UG 1 - en0 0 0 61.81.244.0 61.81.244.134 UHSb 1 - en0 0 0 => 61.81.244/24 61.81.244.134 U 1 - en0 0 0 61.81.244.134 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.255 61.81.244.134 UHSb 1 - en0 0 0 127/8 127.0.0.1 U 1 - lo0 0 0 Route tree for Protocol Family 24 (Internet v6): ::1%1 ::1%1 UH 1 - lo0 0 0 :fail_interface[+102] cl_configure_persistent_address fail_boot -i 61.81.244.134 -n net_ether_01 :cl_configure_persistent_address[1344] version=1.56.1.4 :cl_configure_persistent_address[1346] cl_get_path -S :cl_configure_persistent_address[1346] OP_SEP='~' :cl_configure_persistent_address[1349] get_local_nodename :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :cl_configure_persistent_address[1349] LOCALNODENAME=epprda :cl_configure_persistent_address[1354] [[ -z epprda ]] :cl_configure_persistent_address[1356] NETWORK='' :cl_configure_persistent_address[1357] ALIVE_IF='' :cl_configure_persistent_address[1358] FAILED_IF='' :cl_configure_persistent_address[1359] FAILED_ADDRESS='' :cl_configure_persistent_address[1360] UPDATE_CLSTRMGR=1 :cl_configure_persistent_address[1361] CHECK_HA_ALIVE=1 :cl_configure_persistent_address[1362] RESTORE_ROUTES=/usr/es/sbin/cluster/.pers_restore_routes :cl_configure_persistent_address[1363] RC=0 :cl_configure_persistent_address[1364] B_FLAG=0 :cl_configure_persistent_address[1366] ACTION=fail_boot :cl_configure_persistent_address[1367] shift :cl_configure_persistent_address[1369] getopt n:a:f:i:dPB -i 61.81.244.134 -n net_ether_01 :cl_configure_persistent_address[1369] set -- -i 61.81.244.134 -n net_ether_01 -- :cl_configure_persistent_address[1371] (( 0 != 0 )) :cl_configure_persistent_address[1371] [[ -z fail_boot ]] :cl_configure_persistent_address[1376] [[ -i != -- ]] :cl_configure_persistent_address[1392] FAILED_ADDRESS=61.81.244.134 :cl_configure_persistent_address[1393] shift :cl_configure_persistent_address[1393] shift :cl_configure_persistent_address[1376] [[ -n != -- ]] :cl_configure_persistent_address[1379] NETWORK=net_ether_01 :cl_configure_persistent_address[1380] shift :cl_configure_persistent_address[1380] shift :cl_configure_persistent_address[1376] [[ -- != -- ]] :cl_configure_persistent_address[1418] shift :cl_configure_persistent_address[1422] [[ fail_boot == up ]] :cl_configure_persistent_address[1520] [[ fail_boot == swap ]] :cl_configure_persistent_address[1667] [[ fail_boot == fail_boot ]] :cl_configure_persistent_address[1668] [[ -z 61.81.244.134 ]] :cl_configure_persistent_address[1668] [[ -z net_ether_01 ]] :cl_configure_persistent_address[1672] clgetif -a 61.81.244.134 :cl_configure_persistent_address[1672] 2> /dev/null :cl_configure_persistent_address[1672] awk '{print $1}' :cl_configure_persistent_address[1672] IF=en0 :cl_configure_persistent_address[1673] cllsif -J '~' -Sn 61.81.244.134 :cl_configure_persistent_address[1673] cut -d~ -f3 :cl_configure_persistent_address[1673] NETWORK=net_ether_01 :cl_configure_persistent_address[1677] isAliasingNetwork net_ether_01 :cl_configure_persistent_address[isAliasingNetwork:386] PS4_FUNC=isAliasingNetwork :cl_configure_persistent_address[isAliasingNetwork:386] typeset PS4_FUNC :cl_configure_persistent_address[isAliasingNetwork:387] [[ high == high ]] :cl_configure_persistent_address[isAliasingNetwork:387] set -x :cl_configure_persistent_address[isAliasingNetwork:389] NETWORK=net_ether_01 :cl_configure_persistent_address[isAliasingNetwork:391] odmget -qname=net_ether_01 HACMPnetwork :cl_configure_persistent_address[isAliasingNetwork:392] awk '$1 == "alias" {print $3}' :cl_configure_persistent_address[isAliasingNetwork:393] sed 's/"//g' :cl_configure_persistent_address[isAliasingNetwork:391] print 1 :cl_configure_persistent_address[1677] [[ 1 != 1 ]] :cl_configure_persistent_address[1682] cllsif -J '~' -Spi epprda :cl_configure_persistent_address[1682] awk -F~ '$2 == "persistent" && $3 == "net_ether_01" {print $1}' :cl_configure_persistent_address[1682] PERSISTENT='' :cl_configure_persistent_address[1684] [[ -z '' ]] :cl_configure_persistent_address[1686] exit 0 :fail_interface[+106] :fail_interface[+106] clgetif -n 61.81.244.134 :fail_interface[+106] LC_ALL=C NETMASK=255.255.255.0 :fail_interface[+107] :fail_interface[+107] clgetif -a 61.81.244.134 :fail_interface[+107] LC_ALL=C IF1=en0 :fail_interface[+108] BOOT1=61.81.244.134 :fail_interface[+111] :fail_interface[+111] cllsif -J ~ -Si epprda :fail_interface[+111] awk -F~ -v net=net_ether_01 -v if1=en0 ($2=="boot" && \ $3==net && $9!=if1) {printf("%s\n",$7)} BOOT2= :fail_interface[+111] [[ -n ]] :fail_interface[+111] [[ UNDEFINED != UNDEFINED ]] :fail_interface[+179] export NSORDER= :fail_interface[+184] exit 0 Jan 28 2023 19:42:49 EVENT COMPLETED: fail_interface epprda 61.81.244.134 0 |2023-01-28T19:42:49|8596|EVENT COMPLETED: fail_interface epprda 61.81.244.134 0| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 8599 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_ADAPTER|2023-01-28T19:42:51|8599| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Jan 28 2023 19:42:51 EVENT START: join_interface epprda 61.81.244.134 |2023-01-28T19:42:51|8599|EVENT START: join_interface epprda 61.81.244.134| :join_interface[+64] version=%I% :join_interface[+67] :join_interface[+67] cl_get_path -S OP_SEP=~ :join_interface[+69] [ 2 -ne 2 ] :join_interface[+75] NODENAME=epprda :join_interface[+76] ADDR=61.81.244.134 :join_interface[+77] BOOTIF= :join_interface[+78] ADDR_FAMILY= :join_interface[+79] PREFIX_LEN= :join_interface[+81] set -u :join_interface[+82] [[ epprda = epprda ]] :join_interface[+88] :join_interface[+88] cllsif -J ~ -Si epprda :join_interface[+88] awk -F~ -v bootif=61.81.244.134 ($2=="boot" && $7==bootif) \ {printf("%s\n",$9)} BOOTIF=en0 :join_interface[+88] [[ -n en0 ]] :join_interface[+91] cllsif -J ~ -Sn 61.81.244.134 :join_interface[+91] cut -d~ -f12 :join_interface[+92] tr ~ :join_interface[+92] read IF_ALIAS :join_interface[+92] [[ -n ]] :join_interface[+121] :join_interface[+121] dspmsg scripts.cat 8064 Interface 61.81.244.134 is now available on node epprda.\n 61.81.244.134 epprda MSG=Interface 61.81.244.134 is now available on node epprda. :join_interface[+122] echo Interface 61.81.244.134 is now available on node epprda. :join_interface[+122] 1> /dev/console :join_interface[+124] exit 0 Jan 28 2023 19:42:51 EVENT COMPLETED: join_interface epprda 61.81.244.134 0 |2023-01-28T19:42:51|8599|EVENT COMPLETED: join_interface epprda 61.81.244.134 0| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 8598 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_ADAPTER|2023-01-28T19:42:53|8598| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Jan 28 2023 19:42:53 EVENT START: join_interface epprda 61.81.244.156 |2023-01-28T19:42:54|8598|EVENT START: join_interface epprda 61.81.244.156| :join_interface[+64] version=%I% :join_interface[+67] :join_interface[+67] cl_get_path -S OP_SEP=~ :join_interface[+69] [ 2 -ne 2 ] :join_interface[+75] NODENAME=epprda :join_interface[+76] ADDR=61.81.244.156 :join_interface[+77] BOOTIF= :join_interface[+78] ADDR_FAMILY= :join_interface[+79] PREFIX_LEN= :join_interface[+81] set -u :join_interface[+82] [[ epprda = epprda ]] :join_interface[+88] :join_interface[+88] cllsif -J ~ -Si epprda :join_interface[+88] awk -F~ -v bootif=61.81.244.156 ($2=="boot" && $7==bootif) \ {printf("%s\n",$9)} BOOTIF= :join_interface[+88] [[ -n ]] :join_interface[+121] :join_interface[+121] dspmsg scripts.cat 8064 Interface 61.81.244.156 is now available on node epprda.\n 61.81.244.156 epprda MSG=Interface 61.81.244.156 is now available on node epprda. :join_interface[+122] echo Interface 61.81.244.156 is now available on node epprda. :join_interface[+122] 1> /dev/console :join_interface[+124] exit 0 Jan 28 2023 19:42:54 EVENT COMPLETED: join_interface epprda 61.81.244.156 0 |2023-01-28T19:42:54|8598|EVENT COMPLETED: join_interface epprda 61.81.244.156 0| Jan 28 2023 19:42:56 EVENT START: rg_move_fence epprda 1 |2023-01-28T19:42:56|8593|EVENT START: rg_move_fence epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T19:42:56.342562 + echo '|2023-01-28T19:42:56.342562|INFO: rg_move_fence|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_fence[62] [[ high == high ]] :rg_move_fence[62] version=1.11 :rg_move_fence[63] NODENAME=epprda :rg_move_fence[63] export NODENAME :rg_move_fence[65] set -u :rg_move_fence[67] [ 2 != 2 ] :rg_move_fence[73] set +u :rg_move_fence[75] [[ -z TRUE ]] :rg_move_fence[80] [[ TRUE == TRUE ]] :rg_move_fence[82] LOCAL_NODENAME=epprda :rg_move_fence[83] odmget -qid=1 HACMPgroup :rg_move_fence[83] egrep 'group =' :rg_move_fence[83] awk '{print $3}' :rg_move_fence[83] eval RGNAME='"epprd_rg"' :rg_move_fence[1] RGNAME=epprd_rg +epprd_rg:rg_move_fence[84] GROUPNAME=epprd_rg +epprd_rg:rg_move_fence[85] group_state='$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[86] set +u +epprd_rg:rg_move_fence[87] eval print '$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[1] print +epprd_rg:rg_move_fence[87] RG_MOVE_ONLINE='' +epprd_rg:rg_move_fence[87] export RG_MOVE_ONLINE +epprd_rg:rg_move_fence[88] set -u +epprd_rg:rg_move_fence[89] RG_MOVE_ONLINE=TMP_ERROR +epprd_rg:rg_move_fence[91] set -a +epprd_rg:rg_move_fence[92] clsetenvgrp epprda rg_move epprd_rg '' :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 +epprd_rg:rg_move_fence[92] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[93] RC=0 +epprd_rg:rg_move_fence[94] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[1] FORCEDOWN_GROUPS='' +epprd_rg:rg_move_fence[2] RESOURCE_GROUPS='' +epprd_rg:rg_move_fence[3] HOMELESS_GROUPS='' +epprd_rg:rg_move_fence[4] HOMELESS_FOLLOWER_GROUPS='' +epprd_rg:rg_move_fence[5] ERRSTATE_GROUPS='' +epprd_rg:rg_move_fence[6] PRINCIPAL_ACTIONS='' +epprd_rg:rg_move_fence[7] ASSOCIATE_ACTIONS='' +epprd_rg:rg_move_fence[8] AUXILLIARY_ACTIONS='' +epprd_rg:rg_move_fence[8] SIBLING_GROUPS='' +epprd_rg:rg_move_fence[9] SIBLING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[10] SIBLING_ACQUIRING_GROUPS='' +epprd_rg:rg_move_fence[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[12] SIBLING_RELEASING_GROUPS='' +epprd_rg:rg_move_fence[13] SIBLING_RELEASING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[95] set +a +epprd_rg:rg_move_fence[96] [ 0 -ne 0 ] +epprd_rg:rg_move_fence[103] process_resources FENCE :rg_move_fence[3318] version=1.169 :rg_move_fence[3321] STATUS=0 :rg_move_fence[3322] sddsrv_off=FALSE :rg_move_fence[3324] true :rg_move_fence[3326] : call rgpa, and it will tell us what to do next :rg_move_fence[3328] set -a :rg_move_fence[3329] clRGPA FENCE :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa FENCE 2023-01-28T19:42:56.451808 clrgpa :clRGPA[+55] exit 0 :rg_move_fence[3329] eval JOB_TYPE=NONE :rg_move_fence[1] JOB_TYPE=NONE :rg_move_fence[3330] RC=0 :rg_move_fence[3331] set +a :rg_move_fence[3333] (( 0 != 0 )) :rg_move_fence[3342] RESOURCE_GROUPS='' :rg_move_fence[3343] GROUPNAME='' :rg_move_fence[3343] export GROUPNAME :rg_move_fence[3353] IS_SERVICE_START=1 :rg_move_fence[3354] IS_SERVICE_STOP=1 :rg_move_fence[3360] [[ NONE == RELEASE ]] :rg_move_fence[3360] [[ NONE == ONLINE ]] :rg_move_fence[3729] break :rg_move_fence[3740] : If sddsrv was turned off above, turn it back on again :rg_move_fence[3742] [[ FALSE == TRUE ]] :rg_move_fence[3747] exit 0 +epprd_rg:rg_move_fence[104] : exit status of process_resources FENCE is: 0 +epprd_rg:rg_move_fence[107] [[ TRUE == TRUE ]] +epprd_rg:rg_move_fence[109] export EVENT_TYPE +epprd_rg:rg_move_fence[110] echo ACQUIRE_PRIMARY ACQUIRE_PRIMARY +epprd_rg:rg_move_fence[111] [[ -n '' ]] +epprd_rg:rg_move_fence[141] exit 0 Jan 28 2023 19:42:56 EVENT COMPLETED: rg_move_fence epprda 1 0 |2023-01-28T19:42:56|8593|EVENT COMPLETED: rg_move_fence epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T19:42:56.547596 + echo '|2023-01-28T19:42:56.547596|INFO: rg_move_fence|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 19:42:56 EVENT START: rg_move_acquire epprda 1 |2023-01-28T19:42:56|8593|EVENT START: rg_move_acquire epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T19:42:56.746855 + echo '|2023-01-28T19:42:56.746855|INFO: rg_move_acquire|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_acquire[+54] [[ high == high ]] :rg_move_acquire[+54] version=1.9.1.7 :rg_move_acquire[+57] set -u :rg_move_acquire[+59] [ 2 != 2 ] :rg_move_acquire[+65] set +u :rg_move_acquire[+67] :rg_move_acquire[+67] clodmget -n -q id=1 -f group HACMPgroup RG=epprd_rg :rg_move_acquire[+68] export RG :rg_move_acquire[+70] [[ ACQUIRE_PRIMARY == ACQUIRE_PRIMARY ]] :rg_move_acquire[+75] typeset -i anhp_ret=0 :rg_move_acquire[+76] typeset -i scsi_ret=0 :rg_move_acquire[+78] clodmget -n -q policy = anhp -f value HACMPsplitmerge :rg_move_acquire[+78] typeset ANHP_ENABLED= :rg_move_acquire[+78] [[ == Yes ]] :rg_move_acquire[+87] clodmget -n -q policy = scsi -f value HACMPsplitmerge :rg_move_acquire[+87] typeset SCSIPR_ENABLED= :rg_move_acquire[+87] [[ == Yes ]] :rg_move_acquire[+106] (( 0 == 1 && 0 == 1 )) :rg_move_acquire[+109] (( 0 == 1 && 0 == 0 )) :rg_move_acquire[+112] (( 0 == 1 && 0 == 0 )) :rg_move_acquire[+118] clcallev rg_move epprda 1 ACQUIRE Jan 28 2023 19:42:56 EVENT START: rg_move epprda 1 ACQUIRE |2023-01-28T19:42:56|8593|EVENT START: rg_move epprda 1 ACQUIRE| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-01-28T19:42:56.879506 :clevlog[amlog_trace:320] echo '|2023-01-28T19:42:56.879506|INFO: rg_move|epprd_rg|epprda|1|ACQUIRE' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :rg_move[76] version=%I% :rg_move[86] STATUS=0 :rg_move[88] [[ ! -n '' ]] :rg_move[90] EMULATE=REAL :rg_move[96] set -u :rg_move[98] NODENAME=epprda :rg_move[98] export NODENAME :rg_move[99] RGID=1 :rg_move[100] (( 3 == 3 )) :rg_move[102] ACTION=ACQUIRE :rg_move[108] : serial number for this event is 8593 :rg_move[112] RG_UP_POSTEVENT_ON_NODE=epprda :rg_move[112] export RG_UP_POSTEVENT_ON_NODE :rg_move[116] clodmget -qid=1 -f group -n HACMPgroup :rg_move[116] eval RGNAME=epprd_rg :rg_move[1] RGNAME=epprd_rg :rg_move[118] UPDATESTATD=0 :rg_move[119] export UPDATESTATD :rg_move[123] RG_MOVE_EVENT=true :rg_move[123] export RG_MOVE_EVENT :rg_move[128] group_state='$RESGRP_epprd_rg_epprda' :rg_move[129] set +u :rg_move[130] eval print '$RESGRP_epprd_rg_epprda' :rg_move[1] print :rg_move[130] RG_MOVE_ONLINE='' :rg_move[130] export RG_MOVE_ONLINE :rg_move[131] set -u :rg_move[132] RG_MOVE_ONLINE=TMP_ERROR :rg_move[139] rm -f /tmp/.NFSSTOPPED :rg_move[140] rm -f /tmp/.RPCLOCKDSTOPPED :rg_move[147] set -a :rg_move[148] clsetenvgrp epprda rg_move epprd_rg :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 :rg_move[148] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[149] RC=0 :rg_move[150] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[1] FORCEDOWN_GROUPS='' :rg_move[2] RESOURCE_GROUPS='' :rg_move[3] HOMELESS_GROUPS='' :rg_move[4] HOMELESS_FOLLOWER_GROUPS='' :rg_move[5] ERRSTATE_GROUPS='' :rg_move[6] PRINCIPAL_ACTIONS='' :rg_move[7] ASSOCIATE_ACTIONS='' :rg_move[8] AUXILLIARY_ACTIONS='' :rg_move[8] SIBLING_GROUPS='' :rg_move[9] SIBLING_NODES_BY_GROUP='' :rg_move[10] SIBLING_ACQUIRING_GROUPS='' :rg_move[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' :rg_move[12] SIBLING_RELEASING_GROUPS='' :rg_move[13] SIBLING_RELEASING_NODES_BY_GROUP='' :rg_move[151] set +a :rg_move[155] (( 0 != 0 )) :rg_move[155] [[ -z epprd_rg ]] :rg_move[164] [[ -z TRUE ]] :rg_move[241] AM_SYNC_CALLED_BY=RG_MOVE :rg_move[241] export AM_SYNC_CALLED_BY :rg_move[242] process_resources :process_resources[3318] version=1.169 :process_resources[3321] STATUS=0 :process_resources[3322] sddsrv_off=FALSE :process_resources[3324] true :process_resources[3326] : call rgpa, and it will tell us what to do next :process_resources[3328] set -a :process_resources[3329] clRGPA :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa 2023-01-28T19:42:57.001142 clrgpa :clRGPA[+55] exit 0 :process_resources[3329] eval JOB_TYPE=NONE :process_resources[1] JOB_TYPE=NONE :process_resources[3330] RC=0 :process_resources[3331] set +a :process_resources[3333] (( 0 != 0 )) :process_resources[3342] RESOURCE_GROUPS='' :process_resources[3343] GROUPNAME='' :process_resources[3343] export GROUPNAME :process_resources[3353] IS_SERVICE_START=1 :process_resources[3354] IS_SERVICE_STOP=1 :process_resources[3360] [[ NONE == RELEASE ]] :process_resources[3360] [[ NONE == ONLINE ]] :process_resources[3729] break :process_resources[3740] : If sddsrv was turned off above, turn it back on again :process_resources[3742] [[ FALSE == TRUE ]] :process_resources[3747] exit 0 :rg_move[247] : unsetting AM_SYNC_CALLED_BY from $'callers environment as\n: we dont' require it after this point in execution. :rg_move[250] unset AM_SYNC_CALLED_BY :rg_move[253] [[ -f /tmp/.NFSSTOPPED ]] :rg_move[274] [[ -f /tmp/.RPCLOCKDSTOPPED ]] :rg_move[293] exit 0 Jan 28 2023 19:42:57 EVENT COMPLETED: rg_move epprda 1 ACQUIRE 0 |2023-01-28T19:42:57|8593|EVENT COMPLETED: rg_move epprda 1 ACQUIRE 0| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-01-28T19:42:57.129544 :clevlog[amlog_trace:320] echo '|2023-01-28T19:42:57.129544|INFO: rg_move|epprd_rg|epprda|1|ACQUIRE|0' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :rg_move_acquire[+119] exit_status=0 :rg_move_acquire[+120] : exit status of clcallev rg_move epprda 1 ACQUIRE is: 0 :rg_move_acquire[+121] exit 0 Jan 28 2023 19:42:57 EVENT COMPLETED: rg_move_acquire epprda 1 0 |2023-01-28T19:42:57|8593|EVENT COMPLETED: rg_move_acquire epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T19:42:57.240110 + echo '|2023-01-28T19:42:57.240110|INFO: rg_move_acquire|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 19:43:15 EVENT START: rg_move_complete epprda 1 |2023-01-28T19:43:15|8593|EVENT START: rg_move_complete epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T19:43:15.940316 + echo '|2023-01-28T19:43:15.940316|INFO: rg_move_complete|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :rg_move_complete[91] version=%I% :rg_move_complete[97] STATUS=0 :rg_move_complete[97] typeset -li STATUS :rg_move_complete[99] [[ -z '' ]] :rg_move_complete[101] EMULATE=REAL :rg_move_complete[104] set -u :rg_move_complete[106] (( 2 < 2 || 2 > 3 )) :rg_move_complete[112] NODENAME=epprda :rg_move_complete[112] export NODENAME :rg_move_complete[113] RGID=1 :rg_move_complete[114] (( 2 == 3 )) :rg_move_complete[118] RGDESTINATION='' :rg_move_complete[122] : serial number for this event is 8593 :rg_move_complete[126] : Interpret resource group ID into a resource group name. :rg_move_complete[128] clodmget -qid=1 -f group -n HACMPgroup :rg_move_complete[128] eval RGNAME=epprd_rg :rg_move_complete[1] RGNAME=epprd_rg +epprd_rg:rg_move_complete[129] GROUPNAME=epprd_rg +epprd_rg:rg_move_complete[131] UPDATESTATD=0 +epprd_rg:rg_move_complete[131] typeset -li UPDATESTATD +epprd_rg:rg_move_complete[132] NFSSTOPPED=0 +epprd_rg:rg_move_complete[132] typeset -li NFSSTOPPED +epprd_rg:rg_move_complete[133] LIMIT=60 +epprd_rg:rg_move_complete[133] WAIT=1 +epprd_rg:rg_move_complete[133] TRY=0 +epprd_rg:rg_move_complete[133] typeset -li LIMIT WAIT TRY +epprd_rg:rg_move_complete[136] : If this is a two node cluster and exported filesystems exist, then +epprd_rg:rg_move_complete[137] : when the cluster topology is stable notify rpc.statd of the changes. +epprd_rg:rg_move_complete[139] clnodename +epprd_rg:rg_move_complete[139] wc -l +epprd_rg:rg_move_complete[139] (( 2 == 2 )) +epprd_rg:rg_move_complete[141] clodmget -f group -n HACMPgroup +epprd_rg:rg_move_complete[141] RESOURCE_GROUPS=epprd_rg +epprd_rg:rg_move_complete[144] clodmget -q group='epprd_rg AND name=EXPORT_FILESYSTEM' -f value -n HACMPresource +epprd_rg:rg_move_complete[144] EXPORTLIST=$'/board_org\n/sapmnt/EPP' +epprd_rg:rg_move_complete[146] [[ -n $'/board_org\n/sapmnt/EPP' ]] +epprd_rg:rg_move_complete[146] [[ epprd_rg == epprd_rg ]] +epprd_rg:rg_move_complete[148] UPDATESTATD=1 +epprd_rg:rg_move_complete[149] [[ REAL == EMUL ]] +epprd_rg:rg_move_complete[154] cl_update_statd :cl_update_statd(0)[+174] version=%I% :cl_update_statd(0)[+176] typeset -i RC=0 :cl_update_statd(0)[+178] LOCAL_FOUND= :cl_update_statd(0)[+179] TWIN_NAME= :cl_update_statd(0)[+180] [[ -z epprda ]] :cl_update_statd(0)[+181] :cl_update_statd(0)[+181] cl_get_path -S OP_SEP=~ :cl_update_statd(0)[+182] set -u :cl_update_statd(0)[+187] LOCAL_FOUND=true :cl_update_statd(0)[+189] TWIN_NAME=epprds :cl_update_statd(0)[+194] : Make sure statd is running locally :cl_update_statd(0)[+196] lssrc -s statd :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw inoperative :cl_update_statd(0)[+196] rpcinfo -p :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw status :cl_update_statd(0)[+207] : Get the current twin, if there is one :cl_update_statd(0)[+209] :cl_update_statd(0)[+209] nfso -H sm_gethost :cl_update_statd(0)[+209] 2>& 1 CURTWIN=epprds :cl_update_statd(0)[+210] RC=0 :cl_update_statd(0)[+212] [[ -z true ]] :cl_update_statd(0)[+212] [[ -z epprds ]] :cl_update_statd(0)[+225] : Get the interface to the twin node :cl_update_statd(0)[+227] :cl_update_statd(0)[+227] get_node_ip epprds :cl_update_statd(0)[+9] (( 1 != 1 )) :cl_update_statd(0)[+15] Twin_Name=epprds :cl_update_statd(0)[+16] NewTwin= :cl_update_statd(0)[+19] : Get the Interface details for every interface on the twin node :cl_update_statd(0)[+20] : Reject interfaces on nodes that are not public boot addresses :cl_update_statd(0)[+21] : because those are the only ones we have state information for :cl_update_statd(0)[+23] :cl_update_statd(0)[+23] cllsif -J ~ -Sw -i epprda :cl_update_statd(0)[+23] LC_ALL=C LOCAL_NETWORK_INFO=epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+24] cllsif -J ~ -Sw -i epprds :cl_update_statd(0)[+24] LC_ALL=C :cl_update_statd(0)[+25] read adapt type network net_type attrib node ip_addr skip interface skip netmask skip skip prefix ip_family :cl_update_statd(0)[+25] IFS=~ :cl_update_statd(0)[+25] [[ public != public ]] :cl_update_statd(0)[+25] [[ boot != boot ]] :cl_update_statd(0)[+33] : Find the state of this candidate :cl_update_statd(0)[+33] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+37] :cl_update_statd(0)[+37] print 61.81.244.123 :cl_update_statd(0)[+37] tr ./ xx addr=i61x81x244x123_epprds :cl_update_statd(0)[+43] eval candidate_state=${i61x81x244x123_epprds:-down} :cl_update_statd(0)[+43] candidate_state=UP :cl_update_statd(0)[+46] : If state is UP, check to see if this node can talk to it :cl_update_statd(0)[+46] [[ UP == UP ]] :cl_update_statd(0)[+50] ping -w 5 -c 1 -q 61.81.244.123 :cl_update_statd(0)[+50] 1> /dev/null :cl_update_statd(0)[+61] echo epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+61] tr \n :cl_update_statd(0)[+62] read lcl_adapt lcl_type lcl_network lcl_net_type lcl_attrib lcl_node lcl_ip_addr skip lcl_interface skip lcl_netmask skip skip lcl_prefix lcl_ip_family :cl_update_statd(0)[+62] IFS=~ :cl_update_statd(0)[+62] [[ net_ether_01 != net_ether_01 ]] :cl_update_statd(0)[+62] [[ boot != boot ]] :cl_update_statd(0)[+62] [[ public != public ]] :cl_update_statd(0)[+62] [[ AF_INET != AF_INET ]] :cl_update_statd(0)[+62] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+71] :cl_update_statd(0)[+71] print 61.81.244.134 :cl_update_statd(0)[+71] tr ./ xx addr=i61x81x244x134_epprda :cl_update_statd(0)[+77] eval lcl_candidate_state=${i61x81x244x134_epprda:-down} :cl_update_statd(0)[+77] lcl_candidate_state=UP :cl_update_statd(0)[+77] [[ UP == UP ]] :cl_update_statd(0)[+81] : epprds is on the same network as an interface that is up :cl_update_statd(0)[+82] : on the local node, and the attributes match. :cl_update_statd(0)[+84] NewTwin=epprds :cl_update_statd(0)[+85] break :cl_update_statd(0)[+85] [[ -n epprds ]] :cl_update_statd(0)[+91] break :cl_update_statd(0)[+91] [[ -z epprds ]] :cl_update_statd(0)[+100] echo epprds :cl_update_statd(0)[+101] return 0 NEWTWIN=epprds :cl_update_statd(0)[+227] [[ -z epprds ]] :cl_update_statd(0)[+227] [[ epprds != epprds ]] :cl_update_statd(0)[+259] : RC is actually 0 :cl_update_statd(0)[+266] return 0 +epprd_rg:rg_move_complete[155] (( 0 != 0 )) +epprd_rg:rg_move_complete[160] break +epprd_rg:rg_move_complete[166] : Set the RESOURCE_GROUPS environment variable with the names +epprd_rg:rg_move_complete[167] : of all resource groups participating in this event, and export +epprd_rg:rg_move_complete[168] : them to all successive scripts. +epprd_rg:rg_move_complete[170] set -a +epprd_rg:rg_move_complete[171] clsetenvgrp epprda rg_move_complete epprd_rg :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move_complete epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 +epprd_rg:rg_move_complete[171] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_complete[172] RC=0 +epprd_rg:rg_move_complete[173] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_complete[1] FORCEDOWN_GROUPS='' +epprd_rg:rg_move_complete[2] RESOURCE_GROUPS='' +epprd_rg:rg_move_complete[3] HOMELESS_GROUPS='' +epprd_rg:rg_move_complete[4] HOMELESS_FOLLOWER_GROUPS='' +epprd_rg:rg_move_complete[5] ERRSTATE_GROUPS='' +epprd_rg:rg_move_complete[6] PRINCIPAL_ACTIONS='' +epprd_rg:rg_move_complete[7] ASSOCIATE_ACTIONS='' +epprd_rg:rg_move_complete[8] AUXILLIARY_ACTIONS='' +epprd_rg:rg_move_complete[8] SIBLING_GROUPS='' +epprd_rg:rg_move_complete[9] SIBLING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[10] SIBLING_ACQUIRING_GROUPS='' +epprd_rg:rg_move_complete[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[12] SIBLING_RELEASING_GROUPS='' +epprd_rg:rg_move_complete[13] SIBLING_RELEASING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[174] set +a +epprd_rg:rg_move_complete[175] (( 0 != 0 )) +epprd_rg:rg_move_complete[182] : For each participating resource group, serially process the resources. +epprd_rg:rg_move_complete[251] (( 1 == 1 )) +epprd_rg:rg_move_complete[253] [[ REAL == EMUL ]] +epprd_rg:rg_move_complete[259] stopsrc -s rpc.lockd 0513-044 The rpc.lockd Subsystem was requested to stop. +epprd_rg:rg_move_complete[260] rcstopsrc=0 +epprd_rg:rg_move_complete[261] (( 0 != 0 )) +epprd_rg:rg_move_complete[266] (( TRY=0)) +epprd_rg:rg_move_complete[266] (( 0<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 1<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z '' ]] +epprd_rg:rg_move_complete[273] break +epprd_rg:rg_move_complete[277] [[ ! -z '' ]] +epprd_rg:rg_move_complete[300] : Sure that rpc.lockd stopped. Restart it. +epprd_rg:rg_move_complete[302] startsrc -s rpc.lockd 0513-059 The rpc.lockd Subsystem has been started. Subsystem PID is 23790010. +epprd_rg:rg_move_complete[303] rcstartsrc=0 +epprd_rg:rg_move_complete[304] (( 0 != 0 )) +epprd_rg:rg_move_complete[365] : If the resource group in this rg_move is now homeless, +epprd_rg:rg_move_complete[366] : then we need to put it into an error state. +epprd_rg:rg_move_complete[368] active_node=0 +epprd_rg:rg_move_complete[428] : If the resource group in this rg_move is now homeless_secondary, +epprd_rg:rg_move_complete[429] : then we need to put it into an errorsecondary state. +epprd_rg:rg_move_complete[437] : Set an error state for concurrent groups that have +epprd_rg:rg_move_complete[438] : been brought offline on this node by rg_move. +epprd_rg:rg_move_complete[453] AM_SYNC_CALLED_BY=RG_MOVE_COMPLETE +epprd_rg:rg_move_complete[453] export AM_SYNC_CALLED_BY +epprd_rg:rg_move_complete[454] process_resources :process_resources[3318] version=1.169 :process_resources[3321] STATUS=0 :process_resources[3322] sddsrv_off=FALSE :process_resources[3324] true :process_resources[3326] : call rgpa, and it will tell us what to do next :process_resources[3328] set -a :process_resources[3329] clRGPA :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa 2023-01-28T19:43:17.162477 clrgpa :clRGPA[+55] exit 0 :process_resources[3329] eval JOB_TYPE=REMOUNT_FILESYSTEMS ACTION=ACQUIRE FILE_SYSTEMS='"/board;/board_org"' RESOURCE_GROUPS='"epprd_rg' '"' NFS_NETWORKS='""' NFS_HOSTS='""' IP_LABELS='"epprd"' :process_resources[1] JOB_TYPE=REMOUNT_FILESYSTEMS :process_resources[1] ACTION=ACQUIRE :process_resources[1] FILE_SYSTEMS='/board;/board_org' :process_resources[1] RESOURCE_GROUPS='epprd_rg ' :process_resources[1] NFS_NETWORKS='' :process_resources[1] NFS_HOSTS='' :process_resources[1] IP_LABELS=epprd :process_resources[3330] RC=0 :process_resources[3331] set +a :process_resources[3333] (( 0 != 0 )) :process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ REMOUNT_FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources[3360] [[ REMOUNT_FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources[3629] mount_nfs_filesystems REMOUNT +epprd_rg:process_resources[mount_nfs_filesystems:1437] PS4_FUNC=mount_nfs_filesystems +epprd_rg:process_resources[mount_nfs_filesystems:1437] typeset PS4_FUNC +epprd_rg:process_resources[mount_nfs_filesystems:1438] [[ high == high ]] +epprd_rg:process_resources[mount_nfs_filesystems:1438] set -x +epprd_rg:process_resources[mount_nfs_filesystems:1440] post_event_member=FALSE +epprd_rg:process_resources[mount_nfs_filesystems:1444] [[ epprda == epprda ]] +epprd_rg:process_resources[mount_nfs_filesystems:1446] post_event_member=TRUE +epprd_rg:process_resources[mount_nfs_filesystems:1447] break +epprd_rg:process_resources[mount_nfs_filesystems:1452] : This node will not be in the resource group so do not mount filesystems. +epprd_rg:process_resources[mount_nfs_filesystems:1454] [[ TRUE == FALSE ]] +epprd_rg:process_resources[mount_nfs_filesystems:1459] STAT=0 +epprd_rg:process_resources[mount_nfs_filesystems:1463] export GROUPNAME +epprd_rg:process_resources[mount_nfs_filesystems:1465] get_list_head '/board;/board_org' +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo '/board;/board_org' +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo '/board;/board_org' +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[mount_nfs_filesystems:1465] read LIST_OF_FILE_SYSTEMS_FOR_RG +epprd_rg:process_resources[mount_nfs_filesystems:1466] get_list_tail '/board;/board_org' +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo '/board;/board_org' +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[mount_nfs_filesystems:1466] read FILE_SYSTEMS +epprd_rg:process_resources[mount_nfs_filesystems:1468] get_list_head +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[mount_nfs_filesystems:1468] read NFS_HOST +epprd_rg:process_resources[mount_nfs_filesystems:1469] get_list_tail +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[mount_nfs_filesystems:1469] read NFS_HOSTS +epprd_rg:process_resources[mount_nfs_filesystems:1471] get_list_head +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[mount_nfs_filesystems:1471] read NFS_NETWORK +epprd_rg:process_resources[mount_nfs_filesystems:1472] get_list_tail +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[mount_nfs_filesystems:1472] read NFS_NETWORKS +epprd_rg:process_resources[mount_nfs_filesystems:1474] get_list_head epprd +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo epprd +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo epprd +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[mount_nfs_filesystems:1474] read LIST_OF_IP_LABELS_FOR_RG +epprd_rg:process_resources[mount_nfs_filesystems:1475] get_list_tail epprd +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo epprd +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[mount_nfs_filesystems:1475] read IP_LABELS +epprd_rg:process_resources[mount_nfs_filesystems:1477] MOUNT_FILESYSTEM='/board;/board_org' +epprd_rg:process_resources[mount_nfs_filesystems:1478] NFSMOUNT_LABEL=epprd +epprd_rg:process_resources[mount_nfs_filesystems:1481] : Do the required NFS_mounts. +epprd_rg:process_resources[mount_nfs_filesystems:1484] NW_NFSMOUNT_LABEL='' +epprd_rg:process_resources[mount_nfs_filesystems:1486] [[ -z '' ]] +epprd_rg:process_resources[mount_nfs_filesystems:1488] NFS_HOST=epprda +epprd_rg:process_resources[mount_nfs_filesystems:1491] NFSHOST='' +epprd_rg:process_resources[mount_nfs_filesystems:1492] [[ -n epprda ]] +epprd_rg:process_resources[mount_nfs_filesystems:1494] [[ -n '' ]] +epprd_rg:process_resources[mount_nfs_filesystems:1516] [[ REMOUNT == REMOUNT ]] +epprd_rg:process_resources[mount_nfs_filesystems:1520] arp -d epprd epprd (61.81.244.156) deleted +epprd_rg:process_resources[mount_nfs_filesystems:1526] ping epprd 1024 1 +epprd_rg:process_resources[mount_nfs_filesystems:1526] 1> /dev/null +epprd_rg:process_resources[mount_nfs_filesystems:1528] NFSHOST=epprd +epprd_rg:process_resources[mount_nfs_filesystems:1529] break +epprd_rg:process_resources[mount_nfs_filesystems:1533] [[ -n epprd ]] +epprd_rg:process_resources[mount_nfs_filesystems:1536] : activate_nfs will not wait for the mounts to complete +epprd_rg:process_resources[mount_nfs_filesystems:1538] cl_activate_nfs 1 epprd '/board;/board_org' +epprd_rg:cl_activate_nfs[68] [[ high == high ]] +epprd_rg:cl_activate_nfs[68] version='1.19.4.2 $Source$' +epprd_rg:cl_activate_nfs[70] . /usr/es/sbin/cluster/events/utils/cl_nfs_utils +epprd_rg:cl_activate_nfs[98] PROGNAME=cl_activate_nfs +epprd_rg:cl_activate_nfs[99] [[ high == high ]] +epprd_rg:cl_activate_nfs[101] set -x +epprd_rg:cl_activate_nfs[102] version=%I +epprd_rg:cl_activate_nfs[105] cl_exports_data='' +epprd_rg:cl_activate_nfs[105] typeset cl_exports_data +epprd_rg:cl_activate_nfs[106] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_activate_nfs[72] set -u +epprd_rg:cl_activate_nfs[242] grep -w ^MOUNT_WLMCNTRL_SELFMANAGE /etc/environment +epprd_rg:cl_activate_nfs[242] export eval +epprd_rg:cl_activate_nfs[244] (( 3 < 3 )) +epprd_rg:cl_activate_nfs[253] ATTEMPTS=1 +epprd_rg:cl_activate_nfs[253] typeset -li ATTEMPTS +epprd_rg:cl_activate_nfs[254] HOST=epprd +epprd_rg:cl_activate_nfs[256] shift 2 +epprd_rg:cl_activate_nfs[261] FILELIST='/board;/board_org' +epprd_rg:cl_activate_nfs[266] print '/board;/board_org' +epprd_rg:cl_activate_nfs[266] grep -q '\;/' +epprd_rg:cl_activate_nfs[271] CROSSMOUNTS=TRUE +epprd_rg:cl_activate_nfs[272] print '/board;/board_org' +epprd_rg:cl_activate_nfs[272] tr ' ' '\n' +epprd_rg:cl_activate_nfs[272] /bin/sort -k 1,1 '-t;' +epprd_rg:cl_activate_nfs[272] MOUNTLIST='/board;/board_org' +epprd_rg:cl_activate_nfs[281] ALLNFS=All_nfs_mounts +epprd_rg:cl_activate_nfs[282] cl_RMupdate resource_acquiring All_nfs_mounts cl_activate_nfs 2023-01-28T19:43:17.285215 2023-01-28T19:43:17.289506 +epprd_rg:cl_activate_nfs[288] odmget -q name='RECOVERY_METHOD AND group=epprd_rg' HACMPresource +epprd_rg:cl_activate_nfs[289] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_activate_nfs[288] METHOD=sequential +epprd_rg:cl_activate_nfs[291] odmget -q name='EXPORT_FILESYSTEM AND group=epprd_rg' HACMPresource +epprd_rg:cl_activate_nfs[291] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_activate_nfs[291] EXPORT_FILESYSTEM=$'/board_org\n/sapmnt/EPP' +epprd_rg:cl_activate_nfs[293] odmget -q name='EXPORT_FILESYSTEM_V4 AND group=epprd_rg' HACMPresource +epprd_rg:cl_activate_nfs[293] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_activate_nfs[293] EXPORT_FILESYSTEM_V4='' +epprd_rg:cl_activate_nfs[302] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_activate_nfs[304] [[ -z '' ]] +epprd_rg:cl_activate_nfs[305] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_activate_nfs[308] VERSION_SOURCE=FILES +epprd_rg:cl_activate_nfs[320] [[ FILES == FILES ]] +epprd_rg:cl_activate_nfs[322] export_v3='' +epprd_rg:cl_activate_nfs[323] export_v4='' +epprd_rg:cl_activate_nfs[330] getline_exports /board_org +epprd_rg:cl_activate_nfs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_activate_nfs[getline_exports:45] line='' +epprd_rg:cl_activate_nfs[getline_exports:45] typeset line +epprd_rg:cl_activate_nfs[getline_exports:46] flag=0 +epprd_rg:cl_activate_nfs[getline_exports:46] typeset -i flag +epprd_rg:cl_activate_nfs[getline_exports:47] fs=/board_org +epprd_rg:cl_activate_nfs[getline_exports:49] [[ -z /board_org ]] +epprd_rg:cl_activate_nfs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_activate_nfs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_activate_nfs[getline_exports:56] read -r line +epprd_rg:cl_activate_nfs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_activate_nfs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_activate_nfs[getline_exports:69] (( 1 == 0 )) +epprd_rg:cl_activate_nfs[getline_exports:74] [[ 0 == 1 ]] +epprd_rg:cl_activate_nfs[getline_exports:56] read -r line +epprd_rg:cl_activate_nfs[getline_exports:59] line='/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_activate_nfs[getline_exports:60] line='/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_activate_nfs[getline_exports:63] [[ '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' == #* ]] +epprd_rg:cl_activate_nfs[getline_exports:68] echo '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_activate_nfs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_activate_nfs[getline_exports:69] (( 0 == 0 )) +epprd_rg:cl_activate_nfs[getline_exports:71] flag=1 +epprd_rg:cl_activate_nfs[getline_exports:74] [[ 1 == 1 ]] +epprd_rg:cl_activate_nfs[getline_exports:76] echo '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_activate_nfs[getline_exports:76] grep -w '\\' +epprd_rg:cl_activate_nfs[getline_exports:76] [[ -n '' ]] +epprd_rg:cl_activate_nfs[getline_exports:81] cl_exports_data=' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_activate_nfs[getline_exports:82] break +epprd_rg:cl_activate_nfs[getline_exports:89] return 0 +epprd_rg:cl_activate_nfs[331] export_line=' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_activate_nfs[336] echo /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds +epprd_rg:cl_activate_nfs[336] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_activate_nfs[337] cut -d- -f2- +epprd_rg:cl_activate_nfs[337] tr , ' ' +epprd_rg:cl_activate_nfs[336] options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap:epprd:epprda:epprds root=epprdap:epprd:epprda:epprds ' +epprd_rg:cl_activate_nfs[343] vers_missing=1 +epprd_rg:cl_activate_nfs[366] (( vers_missing )) +epprd_rg:cl_activate_nfs[366] export_v3=' /board_org' +epprd_rg:cl_activate_nfs[330] getline_exports /sapmnt/EPP +epprd_rg:cl_activate_nfs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_activate_nfs[getline_exports:45] line='' +epprd_rg:cl_activate_nfs[getline_exports:45] typeset line +epprd_rg:cl_activate_nfs[getline_exports:46] flag=0 +epprd_rg:cl_activate_nfs[getline_exports:46] typeset -i flag +epprd_rg:cl_activate_nfs[getline_exports:47] fs=/sapmnt/EPP +epprd_rg:cl_activate_nfs[getline_exports:49] [[ -z /sapmnt/EPP ]] +epprd_rg:cl_activate_nfs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_activate_nfs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_activate_nfs[getline_exports:56] read -r line +epprd_rg:cl_activate_nfs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_activate_nfs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:68] grep -q '^[[:space:]]*/sapmnt/EPP[[:space:]]' +epprd_rg:cl_activate_nfs[getline_exports:69] (( 0 == 0 )) +epprd_rg:cl_activate_nfs[getline_exports:71] flag=1 +epprd_rg:cl_activate_nfs[getline_exports:74] [[ 1 == 1 ]] +epprd_rg:cl_activate_nfs[getline_exports:76] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:76] grep -w '\\' +epprd_rg:cl_activate_nfs[getline_exports:76] [[ -n '' ]] +epprd_rg:cl_activate_nfs[getline_exports:81] cl_exports_data=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:82] break +epprd_rg:cl_activate_nfs[getline_exports:89] return 0 +epprd_rg:cl_activate_nfs[331] export_line=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[336] echo /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_activate_nfs[336] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_activate_nfs[337] cut -d- -f2- +epprd_rg:cl_activate_nfs[337] tr , ' ' +epprd_rg:cl_activate_nfs[336] options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap root=epprdap ' +epprd_rg:cl_activate_nfs[343] vers_missing=1 +epprd_rg:cl_activate_nfs[366] (( vers_missing )) +epprd_rg:cl_activate_nfs[366] export_v3=' /board_org /sapmnt/EPP' +epprd_rg:cl_activate_nfs[369] EXPORT_FILESYSTEM=' /board_org /sapmnt/EPP' +epprd_rg:cl_activate_nfs[370] EXPORT_FILESYSTEM_V4='' +epprd_rg:cl_activate_nfs[377] [[ -x /usr/sbin/nfsrgyd ]] +epprd_rg:cl_activate_nfs[378] [[ -n '' ]] +epprd_rg:cl_activate_nfs[379] grep -q vers=4 /etc/filesystems +epprd_rg:cl_activate_nfs[394] [[ TRUE == TRUE ]] +epprd_rg:cl_activate_nfs[411] filesystem=/board_org +epprd_rg:cl_activate_nfs[412] mountpoint=/board +epprd_rg:cl_activate_nfs:/board;/board_org[429] PS4_LOOP='/board;/board_org' +epprd_rg:cl_activate_nfs:/board;/board_org[430] [[ sequential == sequential ]] +epprd_rg:cl_activate_nfs:/board;/board_org[432] nfs_mount 1 epprd /board_org /board +epprd_rg:cl_activate_nfs(0.124):/board;/board_org[nfs_mount:99] (( 4 != 4 )) +epprd_rg:cl_activate_nfs(0.124):/board;/board_org[nfs_mount:108] LIMIT=1 +epprd_rg:cl_activate_nfs(0.124):/board;/board_org[nfs_mount:108] typeset -li LIMIT +epprd_rg:cl_activate_nfs(0.124):/board;/board_org[nfs_mount:109] HOST=epprd +epprd_rg:cl_activate_nfs(0.124):/board;/board_org[nfs_mount:110] FileSystem=/board_org +epprd_rg:cl_activate_nfs(0.124):/board;/board_org[nfs_mount:111] MountPoint=/board +epprd_rg:cl_activate_nfs(0.125):/board;/board_org[nfs_mount:116] mount +epprd_rg:cl_activate_nfs(0.127):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.127):/board;/board_org[nfs_mount:119] [[ mounted == /board ]] +epprd_rg:cl_activate_nfs(0.127):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.127):/board;/board_org[nfs_mount:119] [[ --------------- == /board ]] +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:119] [[ procfs == /board ]] +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:119] [[ ahafs == /board ]] +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:119] [[ /sapcd == /board ]] +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:119] [[ /board == /board ]] +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:121] cl_echo 11 'cl_activate_nfs: Filesystem /board already mounted.\n' cl_activate_nfs /board Jan 28 2023 19:43:17cl_activate_nfs: Filesystem /board already mounted. +epprd_rg:cl_activate_nfs(0.146):/board;/board_org[nfs_mount:122] return 0 +epprd_rg:process_resources[mount_nfs_filesystems:1540] RC=0 +epprd_rg:process_resources[mount_nfs_filesystems:1541] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[mount_nfs_filesystems:1549] (( 0 != 0 )) +epprd_rg:process_resources[mount_nfs_filesystems:1565] return 0 +epprd_rg:process_resources[3630] : failure of remount will not cause failure of the event +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T19:43:17.396949 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=NONE +epprd_rg:process_resources[1] JOB_TYPE=NONE +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ NONE == RELEASE ]] +epprd_rg:process_resources[3360] [[ NONE == ONLINE ]] +epprd_rg:process_resources[3729] break +epprd_rg:process_resources[3740] : If sddsrv was turned off above, turn it back on again +epprd_rg:process_resources[3742] [[ FALSE == TRUE ]] +epprd_rg:process_resources[3747] exit 0 +epprd_rg:rg_move_complete[455] STATUS=0 +epprd_rg:rg_move_complete[456] : The exit status of process_resources is: 0 +epprd_rg:rg_move_complete[461] unset AM_SYNC_CALLED_BY +epprd_rg:rg_move_complete[462] [[ TRUE == TRUE ]] +epprd_rg:rg_move_complete[491] [[ -z '' ]] +epprd_rg:rg_move_complete[493] RESOURCE_GROUPS=epprd_rg +epprd_rg:rg_move_complete[499] GROUPNAME=epprd_rg +epprd_rg:rg_move_complete[499] export GROUPNAME +epprd_rg:rg_move_complete[501] cl_rrmethods2call postrg_move +epprd_rg:cl_rrmethods2call[56] version=%I% +epprd_rg:cl_rrmethods2call[84] RRMETHODS='' +epprd_rg:cl_rrmethods2call[85] NEED_RR_ENV_VARS=no +epprd_rg:cl_rrmethods2call[124] NEED_RR_ENV_VARS=yes +epprd_rg:cl_rrmethods2call[129] : Set the '*_REP_RESOURCE' variables if needed. +epprd_rg:cl_rrmethods2call[131] [[ yes == yes ]] +epprd_rg:cl_rrmethods2call[133] cllsres +epprd_rg:cl_rrmethods2call[133] 2> /dev/null +epprd_rg:cl_rrmethods2call[133] eval APPLICATIONS='"epprd_app"' EXPORT_FILESYSTEM='"/board_org' '/sapmnt/EPP"' FILESYSTEM='""' FORCED_VARYON='"false"' FSCHECK_TOOL='"fsck"' FS_BEFORE_IPADDR='"false"' MOUNT_FILESYSTEM='"/board;/board_org"' RECOVERY_METHOD='"sequential"' SERVICE_LABEL='"epprd"' SSA_DISK_FENCING='"false"' VG_AUTO_IMPORT='"false"' VOLUME_GROUP='"datavg"' USERDEFINED_RESOURCES='""' +epprd_rg:cl_rrmethods2call[1] APPLICATIONS=epprd_app +epprd_rg:cl_rrmethods2call[1] EXPORT_FILESYSTEM='/board_org /sapmnt/EPP' +epprd_rg:cl_rrmethods2call[1] FILESYSTEM='' +epprd_rg:cl_rrmethods2call[1] FORCED_VARYON=false +epprd_rg:cl_rrmethods2call[1] FSCHECK_TOOL=fsck +epprd_rg:cl_rrmethods2call[1] FS_BEFORE_IPADDR=false +epprd_rg:cl_rrmethods2call[1] MOUNT_FILESYSTEM='/board;/board_org' +epprd_rg:cl_rrmethods2call[1] RECOVERY_METHOD=sequential +epprd_rg:cl_rrmethods2call[1] SERVICE_LABEL=epprd +epprd_rg:cl_rrmethods2call[1] SSA_DISK_FENCING=false +epprd_rg:cl_rrmethods2call[1] VG_AUTO_IMPORT=false +epprd_rg:cl_rrmethods2call[1] VOLUME_GROUP=datavg +epprd_rg:cl_rrmethods2call[1] USERDEFINED_RESOURCES='' +epprd_rg:cl_rrmethods2call[137] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[142] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[147] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[152] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[157] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[162] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[167] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[172] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[182] [[ -z '' ]] +epprd_rg:cl_rrmethods2call[184] typeset sysmgdata +epprd_rg:cl_rrmethods2call[185] typeset reposmgdata +epprd_rg:cl_rrmethods2call[186] [[ -x /usr/es/sbin/cluster/xd_generic/xd_cli/clxd_list_mg_smit ]] +epprd_rg:cl_rrmethods2call[191] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[191] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[197] echo '' +epprd_rg:cl_rrmethods2call[199] return 0 +epprd_rg:rg_move_complete[501] METHODS='' +epprd_rg:rg_move_complete[516] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. +epprd_rg:rg_move_complete[518] exit 0 Jan 28 2023 19:43:17 EVENT COMPLETED: rg_move_complete epprda 1 0 |2023-01-28T19:43:17|8593|EVENT COMPLETED: rg_move_complete epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T19:43:17.518402 + echo '|2023-01-28T19:43:17.518402|INFO: rg_move_complete|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log PowerHA SystemMirror Event Summary ---------------------------------------------------------------------------- Serial number for this event: 8593 Event: TE_RG_MOVE_ACQUIRE Start time: Sat Jan 28 19:42:56 2023 End time: Sat Jan 28 19:43:37 2023 Action: Resource: Script Name: ---------------------------------------------------------------------------- Acquiring resource: All_nfs_mounts cl_activate_nfs Search on: Sat.Jan.28.19:43:17.KORST.2023.cl_activate_nfs.All_nfs_mounts.epprd_rg.ref ---------------------------------------------------------------------------- |EVENT_SUMMARY_START|TE_RG_MOVE_ACQUIRE|2023-01-28T19:42:56|2023-01-28T19:43:37|8593| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:43:17.KORST.2023.cl_activate_nfs.All_nfs_mounts.epprd_rg.ref.ref| |EVENT_SUMMARY_END| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 8594 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_EXTERNAL_RESOURCE_STATE_CHANGE_COMPLETE|2023-01-28T19:43:39|8594| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Jan 28 2023 19:43:39 EVENT START: external_resource_state_change_complete epprds |2023-01-28T19:43:39|8594|EVENT START: external_resource_state_change_complete epprds| :external_resource_state_change_complete[61] version=%I% :external_resource_state_change_complete[64] set -u :external_resource_state_change_complete[66] (( 1 != 1 )) :external_resource_state_change_complete[73] : serial number for this event is 8594 :external_resource_state_change_complete[76] : This is the final info of all RGs: :external_resource_state_change_complete[78] clRGinfo -p -t :external_resource_state_change_complete[78] 2> /dev/null Cluster Name: epprda_cluster Resource Group Name: epprd_rg Node Group State Delayed Timers ---------------------------------------------------------------- --------------- ------------------- epprda OFFLINE epprds ONLINE :external_resource_state_change_complete[80] exit 0 Jan 28 2023 19:43:39 EVENT COMPLETED: external_resource_state_change_complete epprds 0 |2023-01-28T19:43:40|8594|EVENT COMPLETED: external_resource_state_change_complete epprds 0| Jan 28 2023 19:49:03 EVENT START: admin_op user_rg_move 8601 0 |2023-01-28T19:49:03|8601|EVENT START: admin_op user_rg_move 8601 0| :admin_op[110] trap sigint_handler INT :admin_op[116] OP_TYPE=user_rg_move :admin_op[116] typeset OP_TYPE :admin_op[117] SERIAL=8601 :admin_op[117] typeset -li SERIAL :admin_op[118] INVALID=0 :admin_op[118] typeset -li INVALID The administrator initiated the following action at Sat Jan 28 19:49:03 KORST 2023 Check smit.log and clutils.log for additional details. Move a Resource Group to Another Node / SiteAttempting to move resource group epprd_rg to node epprda. Jan 28 2023 19:49:03 EVENT COMPLETED: admin_op user_rg_move 8601 0 0 |2023-01-28T19:49:03|8601|EVENT COMPLETED: admin_op user_rg_move 8601 0 0| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 8601 Enqueued rg_move acquire event for resource group epprd_rg. Enqueued rg_move release event for resource group epprd_rg. Cluster External Resource State Change Complete Event has been enqueued. ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_EXTERNAL_RESOURCE_STATE_CHANGE|2023-01-28T19:49:03|8601| |CLUSTER_RG_MOVE_ACQUIRE|epprd_rg| |CLUSTER_RG_MOVE_RELEASE|epprd_rg| |EXTERNAL_RESOURCE_STATE_CHANGE_COMPLETE| |EVENT_PREAMBLE_END| Jan 28 2023 19:49:03 EVENT START: external_resource_state_change epprda |2023-01-28T19:49:03|8601|EVENT START: external_resource_state_change epprda| :external_resource_state_change[62] version=%I% :external_resource_state_change[65] set -u :external_resource_state_change[67] (( 1 != 1 )) :external_resource_state_change[74] : serial number for this event is 8601 :external_resource_state_change[78] exit 0 Jan 28 2023 19:49:04 EVENT COMPLETED: external_resource_state_change epprda 0 |2023-01-28T19:49:04|8601|EVENT COMPLETED: external_resource_state_change epprda 0| Jan 28 2023 19:49:04 EVENT START: rg_move_release epprda 1 |2023-01-28T19:49:04|8603|EVENT START: rg_move_release epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T19:49:04.299712 + echo '|2023-01-28T19:49:04.299712|INFO: rg_move_release|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_release[+54] [[ high = high ]] :rg_move_release[+54] version=1.6 :rg_move_release[+56] set -u :rg_move_release[+58] [ 2 != 2 ] :rg_move_release[+64] set +u :rg_move_release[+66] clcallev rg_move epprda 1 RELEASE Jan 28 2023 19:49:04 EVENT START: rg_move epprda 1 RELEASE |2023-01-28T19:49:04|8603|EVENT START: rg_move epprda 1 RELEASE| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-01-28T19:49:04.425714 :clevlog[amlog_trace:320] echo '|2023-01-28T19:49:04.425714|INFO: rg_move|epprd_rg|epprda|1|RELEASE' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :rg_move[76] version=%I% :rg_move[86] STATUS=0 :rg_move[88] [[ ! -n '' ]] :rg_move[90] EMULATE=REAL :rg_move[96] set -u :rg_move[98] NODENAME=epprda :rg_move[98] export NODENAME :rg_move[99] RGID=1 :rg_move[100] (( 3 == 3 )) :rg_move[102] ACTION=RELEASE :rg_move[108] : serial number for this event is 8603 :rg_move[112] RG_UP_POSTEVENT_ON_NODE=epprda :rg_move[112] export RG_UP_POSTEVENT_ON_NODE :rg_move[116] clodmget -qid=1 -f group -n HACMPgroup :rg_move[116] eval RGNAME=epprd_rg :rg_move[1] RGNAME=epprd_rg :rg_move[118] UPDATESTATD=0 :rg_move[119] export UPDATESTATD :rg_move[123] RG_MOVE_EVENT=true :rg_move[123] export RG_MOVE_EVENT :rg_move[128] group_state='$RESGRP_epprd_rg_epprda' :rg_move[129] set +u :rg_move[130] eval print '$RESGRP_epprd_rg_epprda' :rg_move[1] print :rg_move[130] RG_MOVE_ONLINE='' :rg_move[130] export RG_MOVE_ONLINE :rg_move[131] set -u :rg_move[132] RG_MOVE_ONLINE=TMP_ERROR :rg_move[139] rm -f /tmp/.NFSSTOPPED :rg_move[140] rm -f /tmp/.RPCLOCKDSTOPPED :rg_move[147] set -a :rg_move[148] clsetenvgrp epprda rg_move epprd_rg :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 :rg_move[148] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[149] RC=0 :rg_move[150] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[1] FORCEDOWN_GROUPS='' :rg_move[2] RESOURCE_GROUPS='' :rg_move[3] HOMELESS_GROUPS='' :rg_move[4] HOMELESS_FOLLOWER_GROUPS='' :rg_move[5] ERRSTATE_GROUPS='' :rg_move[6] PRINCIPAL_ACTIONS='' :rg_move[7] ASSOCIATE_ACTIONS='' :rg_move[8] AUXILLIARY_ACTIONS='' :rg_move[8] SIBLING_GROUPS='' :rg_move[9] SIBLING_NODES_BY_GROUP='' :rg_move[10] SIBLING_ACQUIRING_GROUPS='' :rg_move[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' :rg_move[12] SIBLING_RELEASING_GROUPS='' :rg_move[13] SIBLING_RELEASING_NODES_BY_GROUP='' :rg_move[151] set +a :rg_move[155] (( 0 != 0 )) :rg_move[155] [[ -z epprd_rg ]] :rg_move[164] [[ -z TRUE ]] :rg_move[241] AM_SYNC_CALLED_BY=RG_MOVE :rg_move[241] export AM_SYNC_CALLED_BY :rg_move[242] process_resources :process_resources[3318] version=1.169 :process_resources[3321] STATUS=0 :process_resources[3322] sddsrv_off=FALSE :process_resources[3324] true :process_resources[3326] : call rgpa, and it will tell us what to do next :process_resources[3328] set -a :process_resources[3329] clRGPA :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa 2023-01-28T19:49:04.548091 clrgpa :clRGPA[+55] exit 0 :process_resources[3329] eval JOB_TYPE=NONE :process_resources[1] JOB_TYPE=NONE :process_resources[3330] RC=0 :process_resources[3331] set +a :process_resources[3333] (( 0 != 0 )) :process_resources[3342] RESOURCE_GROUPS='' :process_resources[3343] GROUPNAME='' :process_resources[3343] export GROUPNAME :process_resources[3353] IS_SERVICE_START=1 :process_resources[3354] IS_SERVICE_STOP=1 :process_resources[3360] [[ NONE == RELEASE ]] :process_resources[3360] [[ NONE == ONLINE ]] :process_resources[3729] break :process_resources[3740] : If sddsrv was turned off above, turn it back on again :process_resources[3742] [[ FALSE == TRUE ]] :process_resources[3747] exit 0 :rg_move[247] : unsetting AM_SYNC_CALLED_BY from $'callers environment as\n: we dont' require it after this point in execution. :rg_move[250] unset AM_SYNC_CALLED_BY :rg_move[253] [[ -f /tmp/.NFSSTOPPED ]] :rg_move[274] [[ -f /tmp/.RPCLOCKDSTOPPED ]] :rg_move[293] exit 0 Jan 28 2023 19:49:04 EVENT COMPLETED: rg_move epprda 1 RELEASE 0 |2023-01-28T19:49:04|8603|EVENT COMPLETED: rg_move epprda 1 RELEASE 0| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-01-28T19:49:04.655710 :clevlog[amlog_trace:320] echo '|2023-01-28T19:49:04.655710|INFO: rg_move|epprd_rg|epprda|1|RELEASE|0' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :rg_move_release[+68] exit 0 Jan 28 2023 19:49:04 EVENT COMPLETED: rg_move_release epprda 1 0 |2023-01-28T19:49:04|8603|EVENT COMPLETED: rg_move_release epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T19:49:04.781482 + echo '|2023-01-28T19:49:04.781482|INFO: rg_move_release|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 19:50:32 EVENT START: rg_move_fence epprda 1 |2023-01-28T19:50:32|8603|EVENT START: rg_move_fence epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T19:50:32.813660 + echo '|2023-01-28T19:50:32.813660|INFO: rg_move_fence|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_fence[62] [[ high == high ]] :rg_move_fence[62] version=1.11 :rg_move_fence[63] NODENAME=epprda :rg_move_fence[63] export NODENAME :rg_move_fence[65] set -u :rg_move_fence[67] [ 2 != 2 ] :rg_move_fence[73] set +u :rg_move_fence[75] [[ -z TRUE ]] :rg_move_fence[80] [[ TRUE == TRUE ]] :rg_move_fence[82] LOCAL_NODENAME=epprda :rg_move_fence[83] odmget -qid=1 HACMPgroup :rg_move_fence[83] egrep 'group =' :rg_move_fence[83] awk '{print $3}' :rg_move_fence[83] eval RGNAME='"epprd_rg"' :rg_move_fence[1] RGNAME=epprd_rg +epprd_rg:rg_move_fence[84] GROUPNAME=epprd_rg +epprd_rg:rg_move_fence[85] group_state='$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[86] set +u +epprd_rg:rg_move_fence[87] eval print '$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[1] print +epprd_rg:rg_move_fence[87] RG_MOVE_ONLINE='' +epprd_rg:rg_move_fence[87] export RG_MOVE_ONLINE +epprd_rg:rg_move_fence[88] set -u +epprd_rg:rg_move_fence[89] RG_MOVE_ONLINE=TMP_ERROR +epprd_rg:rg_move_fence[91] set -a +epprd_rg:rg_move_fence[92] clsetenvgrp epprda rg_move epprd_rg '' :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 +epprd_rg:rg_move_fence[92] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[93] RC=0 +epprd_rg:rg_move_fence[94] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[1] FORCEDOWN_GROUPS='' +epprd_rg:rg_move_fence[2] RESOURCE_GROUPS='' +epprd_rg:rg_move_fence[3] HOMELESS_GROUPS='' +epprd_rg:rg_move_fence[4] HOMELESS_FOLLOWER_GROUPS='' +epprd_rg:rg_move_fence[5] ERRSTATE_GROUPS='' +epprd_rg:rg_move_fence[6] PRINCIPAL_ACTIONS='' +epprd_rg:rg_move_fence[7] ASSOCIATE_ACTIONS='' +epprd_rg:rg_move_fence[8] AUXILLIARY_ACTIONS='' +epprd_rg:rg_move_fence[8] SIBLING_GROUPS='' +epprd_rg:rg_move_fence[9] SIBLING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[10] SIBLING_ACQUIRING_GROUPS='' +epprd_rg:rg_move_fence[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[12] SIBLING_RELEASING_GROUPS='' +epprd_rg:rg_move_fence[13] SIBLING_RELEASING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[95] set +a +epprd_rg:rg_move_fence[96] [ 0 -ne 0 ] +epprd_rg:rg_move_fence[103] process_resources FENCE :rg_move_fence[3318] version=1.169 :rg_move_fence[3321] STATUS=0 :rg_move_fence[3322] sddsrv_off=FALSE :rg_move_fence[3324] true :rg_move_fence[3326] : call rgpa, and it will tell us what to do next :rg_move_fence[3328] set -a :rg_move_fence[3329] clRGPA FENCE :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa FENCE 2023-01-28T19:50:32.918358 clrgpa :clRGPA[+55] exit 0 :rg_move_fence[3329] eval JOB_TYPE=NONE :rg_move_fence[1] JOB_TYPE=NONE :rg_move_fence[3330] RC=0 :rg_move_fence[3331] set +a :rg_move_fence[3333] (( 0 != 0 )) :rg_move_fence[3342] RESOURCE_GROUPS='' :rg_move_fence[3343] GROUPNAME='' :rg_move_fence[3343] export GROUPNAME :rg_move_fence[3353] IS_SERVICE_START=1 :rg_move_fence[3354] IS_SERVICE_STOP=1 :rg_move_fence[3360] [[ NONE == RELEASE ]] :rg_move_fence[3360] [[ NONE == ONLINE ]] :rg_move_fence[3729] break :rg_move_fence[3740] : If sddsrv was turned off above, turn it back on again :rg_move_fence[3742] [[ FALSE == TRUE ]] :rg_move_fence[3747] exit 0 +epprd_rg:rg_move_fence[104] : exit status of process_resources FENCE is: 0 +epprd_rg:rg_move_fence[107] [[ TRUE == TRUE ]] +epprd_rg:rg_move_fence[109] export EVENT_TYPE +epprd_rg:rg_move_fence[110] echo RELEASE_PRIMARY RELEASE_PRIMARY +epprd_rg:rg_move_fence[111] [[ -n '' ]] +epprd_rg:rg_move_fence[141] exit 0 Jan 28 2023 19:50:32 EVENT COMPLETED: rg_move_fence epprda 1 0 |2023-01-28T19:50:32|8603|EVENT COMPLETED: rg_move_fence epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T19:50:33.011768 + echo '|2023-01-28T19:50:33.011768|INFO: rg_move_fence|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log PowerHA SystemMirror Event Summary ---------------------------------------------------------------------------- Serial number for this event: 8603 Event: TE_RG_MOVE_RELEASE Start time: Sat Jan 28 19:49:04 2023 End time: Sat Jan 28 19:50:33 2023 Action: Resource: Script Name: ---------------------------------------------------------------------------- No resources changed as a result of this event ---------------------------------------------------------------------------- |EVENT_SUMMARY_START|TE_RG_MOVE_RELEASE|2023-01-28T19:49:04|2023-01-28T19:50:33|8603| |EVENT_NO_ACTION| |EVENT_SUMMARY_END| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 8606 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_NETWORK|2023-01-28T19:50:34|8606| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Jan 28 2023 19:50:34 EVENT START: network_up epprds net_ether_01 |2023-01-28T19:50:34|8606|EVENT START: network_up epprds net_ether_01| :network_up[+66] version=%I% :network_up[+69] set -a :network_up[+70] cllsparam -n epprda :network_up[+70] eval NODE_NAME=epprda VERBOSE_LOGGING=high PS4='${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO] ' DEBUG_LEVEL=Standard LC_ALL='C' :network_up[+70] NODE_NAME=epprda VERBOSE_LOGGING=high PS4=${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO] DEBUG_LEVEL=Standard LC_ALL=C :network_up[+71] set +a :network_up[+73] STATUS=0 :network_up[+75] [ 2 -ne 2 ] :network_up[+81] [[ epprds == epprda ]] :network_up[+84] export NETWORKNAME=net_ether_01 :network_up[+89] [[ epprds == epprda ]] :network_up[+92] exit 0 Jan 28 2023 19:50:34 EVENT COMPLETED: network_up epprds net_ether_01 0 |2023-01-28T19:50:34|8606|EVENT COMPLETED: network_up epprds net_ether_01 0| Jan 28 2023 19:50:34 EVENT START: network_up_complete epprds net_ether_01 |2023-01-28T19:50:34|8606|EVENT START: network_up_complete epprds net_ether_01| :network_up_complete[+68] version=%I% :network_up_complete[+72] [ 2 -ne 2 ] :network_up_complete[+78] [[ epprds == epprda ]] :network_up_complete[+82] NODENAME=epprds :network_up_complete[+83] NETWORK=net_ether_01 :network_up_complete[+84] export NETWORKNAME=net_ether_01 :network_up_complete[+86] [[ -z ]] :network_up_complete[+88] EMULATE=REAL :network_up_complete[+90] set -u :network_up_complete[+96] STATUS=0 :network_up_complete[+100] odmget HACMPnode :network_up_complete[+100] grep name = :network_up_complete[+100] sort :network_up_complete[+100] uniq :network_up_complete[+100] wc -l :network_up_complete[+100] [ 2 -eq 2 ] :network_up_complete[+102] :network_up_complete[+102] odmget HACMPgroup :network_up_complete[+102] grep group = :network_up_complete[+102] awk {print $3} :network_up_complete[+102] sed s/"//g RESOURCE_GROUPS=epprd_rg :network_up_complete[+106] :network_up_complete[+106] odmget -q group=epprd_rg AND name=EXPORT_FILESYSTEM HACMPresource :network_up_complete[+106] grep value :network_up_complete[+106] awk {print $3} :network_up_complete[+106] sed s/"//g EXPORTLIST=/board_org /sapmnt/EPP :network_up_complete[+107] [ -n /board_org /sapmnt/EPP ] :network_up_complete[+109] [ REAL = EMUL ] :network_up_complete[+114] cl_update_statd :cl_update_statd(0)[+174] version=%I% :cl_update_statd(0)[+176] typeset -i RC=0 :cl_update_statd(0)[+178] LOCAL_FOUND= :cl_update_statd(0)[+179] TWIN_NAME= :cl_update_statd(0)[+180] [[ -z epprda ]] :cl_update_statd(0)[+181] :cl_update_statd(0)[+181] cl_get_path -S OP_SEP=~ :cl_update_statd(0)[+182] set -u :cl_update_statd(0)[+187] LOCAL_FOUND=true :cl_update_statd(0)[+189] TWIN_NAME=epprds :cl_update_statd(0)[+194] : Make sure statd is running locally :cl_update_statd(0)[+196] lssrc -s statd :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw inoperative :cl_update_statd(0)[+196] rpcinfo -p :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw status :cl_update_statd(0)[+207] : Get the current twin, if there is one :cl_update_statd(0)[+209] :cl_update_statd(0)[+209] nfso -H sm_gethost :cl_update_statd(0)[+209] 2>& 1 CURTWIN=epprds :cl_update_statd(0)[+210] RC=0 :cl_update_statd(0)[+212] [[ -z true ]] :cl_update_statd(0)[+212] [[ -z epprds ]] :cl_update_statd(0)[+225] : Get the interface to the twin node :cl_update_statd(0)[+227] :cl_update_statd(0)[+227] get_node_ip epprds :cl_update_statd(0)[+9] (( 1 != 1 )) :cl_update_statd(0)[+15] Twin_Name=epprds :cl_update_statd(0)[+16] NewTwin= :cl_update_statd(0)[+19] : Get the Interface details for every interface on the twin node :cl_update_statd(0)[+20] : Reject interfaces on nodes that are not public boot addresses :cl_update_statd(0)[+21] : because those are the only ones we have state information for :cl_update_statd(0)[+23] :cl_update_statd(0)[+23] cllsif -J ~ -Sw -i epprda :cl_update_statd(0)[+23] LC_ALL=C LOCAL_NETWORK_INFO=epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+24] cllsif -J ~ -Sw -i epprds :cl_update_statd(0)[+24] LC_ALL=C :cl_update_statd(0)[+25] read adapt type network net_type attrib node ip_addr skip interface skip netmask skip skip prefix ip_family :cl_update_statd(0)[+25] IFS=~ :cl_update_statd(0)[+25] [[ public != public ]] :cl_update_statd(0)[+25] [[ boot != boot ]] :cl_update_statd(0)[+33] : Find the state of this candidate :cl_update_statd(0)[+33] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+37] :cl_update_statd(0)[+37] print 61.81.244.123 :cl_update_statd(0)[+37] tr ./ xx addr=i61x81x244x123_epprds :cl_update_statd(0)[+43] eval candidate_state=${i61x81x244x123_epprds:-down} :cl_update_statd(0)[+43] candidate_state=UP :cl_update_statd(0)[+46] : If state is UP, check to see if this node can talk to it :cl_update_statd(0)[+46] [[ UP == UP ]] :cl_update_statd(0)[+50] ping -w 5 -c 1 -q 61.81.244.123 :cl_update_statd(0)[+50] 1> /dev/null :cl_update_statd(0)[+61] echo epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+61] tr \n :cl_update_statd(0)[+62] read lcl_adapt lcl_type lcl_network lcl_net_type lcl_attrib lcl_node lcl_ip_addr skip lcl_interface skip lcl_netmask skip skip lcl_prefix lcl_ip_family :cl_update_statd(0)[+62] IFS=~ :cl_update_statd(0)[+62] [[ net_ether_01 != net_ether_01 ]] :cl_update_statd(0)[+62] [[ boot != boot ]] :cl_update_statd(0)[+62] [[ public != public ]] :cl_update_statd(0)[+62] [[ AF_INET != AF_INET ]] :cl_update_statd(0)[+62] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+71] :cl_update_statd(0)[+71] print 61.81.244.134 :cl_update_statd(0)[+71] tr ./ xx addr=i61x81x244x134_epprda :cl_update_statd(0)[+77] eval lcl_candidate_state=${i61x81x244x134_epprda:-down} :cl_update_statd(0)[+77] lcl_candidate_state=UP :cl_update_statd(0)[+77] [[ UP == UP ]] :cl_update_statd(0)[+81] : epprds is on the same network as an interface that is up :cl_update_statd(0)[+82] : on the local node, and the attributes match. :cl_update_statd(0)[+84] NewTwin=epprds :cl_update_statd(0)[+85] break :cl_update_statd(0)[+85] [[ -n epprds ]] :cl_update_statd(0)[+91] break :cl_update_statd(0)[+91] [[ -z epprds ]] :cl_update_statd(0)[+100] echo epprds :cl_update_statd(0)[+101] return 0 NEWTWIN=epprds :cl_update_statd(0)[+227] [[ -z epprds ]] :cl_update_statd(0)[+227] [[ epprds != epprds ]] :cl_update_statd(0)[+259] : RC is actually 0 :cl_update_statd(0)[+266] return 0 :network_up_complete[+115] [ 0 -ne 0 ] :network_up_complete[+120] break :network_up_complete[+125] [[ epprds == epprda ]] :network_up_complete[+141] :network_up_complete[+141] cl_rrmethods2call net_initialization :cl_rrmethods2call[56] version=%I% :cl_rrmethods2call[84] RRMETHODS='' :cl_rrmethods2call[85] NEED_RR_ENV_VARS=no :cl_rrmethods2call[90] : The network methods are returned if the Network type is XD_data. :cl_rrmethods2call[92] clodmget -qname=net_ether_01 -f nimname -n HACMPnetwork :cl_rrmethods2call[92] RRNET=ether :cl_rrmethods2call[94] [[ ether == XD_data ]] :cl_rrmethods2call[98] return 0 METHODS= :network_up_complete[+163] :network_up_complete[+163] clodmget -n -q name=MOUNT_FILESYSTEM -f group HACMPresource CROSSMOUNTS=epprd_rg :network_up_complete[+165] [ -n epprd_rg -a epprds = epprda ] :network_up_complete[+257] [[ epprds == epprda ]] :network_up_complete[+265] exit 0 Jan 28 2023 19:50:35 EVENT COMPLETED: network_up_complete epprds net_ether_01 0 |2023-01-28T19:50:35|8606|EVENT COMPLETED: network_up_complete epprds net_ether_01 0| Jan 28 2023 19:50:37 EVENT START: rg_move_fence epprda 1 |2023-01-28T19:50:37|8602|EVENT START: rg_move_fence epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T19:50:37.524379 + echo '|2023-01-28T19:50:37.524379|INFO: rg_move_fence|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_fence[62] [[ high == high ]] :rg_move_fence[62] version=1.11 :rg_move_fence[63] NODENAME=epprda :rg_move_fence[63] export NODENAME :rg_move_fence[65] set -u :rg_move_fence[67] [ 2 != 2 ] :rg_move_fence[73] set +u :rg_move_fence[75] [[ -z TRUE ]] :rg_move_fence[80] [[ TRUE == TRUE ]] :rg_move_fence[82] LOCAL_NODENAME=epprda :rg_move_fence[83] odmget -qid=1 HACMPgroup :rg_move_fence[83] egrep 'group =' :rg_move_fence[83] awk '{print $3}' :rg_move_fence[83] eval RGNAME='"epprd_rg"' :rg_move_fence[1] RGNAME=epprd_rg +epprd_rg:rg_move_fence[84] GROUPNAME=epprd_rg +epprd_rg:rg_move_fence[85] group_state='$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[86] set +u +epprd_rg:rg_move_fence[87] eval print '$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[1] print +epprd_rg:rg_move_fence[87] RG_MOVE_ONLINE='' +epprd_rg:rg_move_fence[87] export RG_MOVE_ONLINE +epprd_rg:rg_move_fence[88] set -u +epprd_rg:rg_move_fence[89] RG_MOVE_ONLINE=TMP_ERROR +epprd_rg:rg_move_fence[91] set -a +epprd_rg:rg_move_fence[92] clsetenvgrp epprda rg_move epprd_rg '' :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 +epprd_rg:rg_move_fence[92] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[93] RC=0 +epprd_rg:rg_move_fence[94] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[1] FORCEDOWN_GROUPS='' +epprd_rg:rg_move_fence[2] RESOURCE_GROUPS='' +epprd_rg:rg_move_fence[3] HOMELESS_GROUPS='' +epprd_rg:rg_move_fence[4] HOMELESS_FOLLOWER_GROUPS='' +epprd_rg:rg_move_fence[5] ERRSTATE_GROUPS='' +epprd_rg:rg_move_fence[6] PRINCIPAL_ACTIONS='' +epprd_rg:rg_move_fence[7] ASSOCIATE_ACTIONS='' +epprd_rg:rg_move_fence[8] AUXILLIARY_ACTIONS='' +epprd_rg:rg_move_fence[8] SIBLING_GROUPS='' +epprd_rg:rg_move_fence[9] SIBLING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[10] SIBLING_ACQUIRING_GROUPS='' +epprd_rg:rg_move_fence[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[12] SIBLING_RELEASING_GROUPS='' +epprd_rg:rg_move_fence[13] SIBLING_RELEASING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[95] set +a +epprd_rg:rg_move_fence[96] [ 0 -ne 0 ] +epprd_rg:rg_move_fence[103] process_resources FENCE :rg_move_fence[3318] version=1.169 :rg_move_fence[3321] STATUS=0 :rg_move_fence[3322] sddsrv_off=FALSE :rg_move_fence[3324] true :rg_move_fence[3326] : call rgpa, and it will tell us what to do next :rg_move_fence[3328] set -a :rg_move_fence[3329] clRGPA FENCE :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa FENCE 2023-01-28T19:50:37.629205 clrgpa :clRGPA[+55] exit 0 :rg_move_fence[3329] eval JOB_TYPE=NONE :rg_move_fence[1] JOB_TYPE=NONE :rg_move_fence[3330] RC=0 :rg_move_fence[3331] set +a :rg_move_fence[3333] (( 0 != 0 )) :rg_move_fence[3342] RESOURCE_GROUPS='' :rg_move_fence[3343] GROUPNAME='' :rg_move_fence[3343] export GROUPNAME :rg_move_fence[3353] IS_SERVICE_START=1 :rg_move_fence[3354] IS_SERVICE_STOP=1 :rg_move_fence[3360] [[ NONE == RELEASE ]] :rg_move_fence[3360] [[ NONE == ONLINE ]] :rg_move_fence[3729] break :rg_move_fence[3740] : If sddsrv was turned off above, turn it back on again :rg_move_fence[3742] [[ FALSE == TRUE ]] :rg_move_fence[3747] exit 0 +epprd_rg:rg_move_fence[104] : exit status of process_resources FENCE is: 0 +epprd_rg:rg_move_fence[107] [[ TRUE == TRUE ]] +epprd_rg:rg_move_fence[109] export EVENT_TYPE +epprd_rg:rg_move_fence[110] echo ACQUIRE_PRIMARY ACQUIRE_PRIMARY +epprd_rg:rg_move_fence[111] [[ -n '' ]] +epprd_rg:rg_move_fence[141] exit 0 Jan 28 2023 19:50:37 EVENT COMPLETED: rg_move_fence epprda 1 0 |2023-01-28T19:50:37|8602|EVENT COMPLETED: rg_move_fence epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T19:50:37.722811 + echo '|2023-01-28T19:50:37.722811|INFO: rg_move_fence|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 19:50:37 EVENT START: rg_move_acquire epprda 1 |2023-01-28T19:50:37|8602|EVENT START: rg_move_acquire epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T19:50:37.927155 + echo '|2023-01-28T19:50:37.927155|INFO: rg_move_acquire|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_acquire[+54] [[ high == high ]] :rg_move_acquire[+54] version=1.9.1.7 :rg_move_acquire[+57] set -u :rg_move_acquire[+59] [ 2 != 2 ] :rg_move_acquire[+65] set +u :rg_move_acquire[+67] :rg_move_acquire[+67] clodmget -n -q id=1 -f group HACMPgroup RG=epprd_rg :rg_move_acquire[+68] export RG :rg_move_acquire[+70] [[ ACQUIRE_PRIMARY == ACQUIRE_PRIMARY ]] :rg_move_acquire[+75] typeset -i anhp_ret=0 :rg_move_acquire[+76] typeset -i scsi_ret=0 :rg_move_acquire[+78] clodmget -n -q policy = anhp -f value HACMPsplitmerge :rg_move_acquire[+78] typeset ANHP_ENABLED= :rg_move_acquire[+78] [[ == Yes ]] :rg_move_acquire[+87] clodmget -n -q policy = scsi -f value HACMPsplitmerge :rg_move_acquire[+87] typeset SCSIPR_ENABLED= :rg_move_acquire[+87] [[ == Yes ]] :rg_move_acquire[+106] (( 0 == 1 && 0 == 1 )) :rg_move_acquire[+109] (( 0 == 1 && 0 == 0 )) :rg_move_acquire[+112] (( 0 == 1 && 0 == 0 )) :rg_move_acquire[+118] clcallev rg_move epprda 1 ACQUIRE Jan 28 2023 19:50:37 EVENT START: rg_move epprda 1 ACQUIRE |2023-01-28T19:50:38|8602|EVENT START: rg_move epprda 1 ACQUIRE| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-01-28T19:50:38.059655 :clevlog[amlog_trace:320] echo '|2023-01-28T19:50:38.059655|INFO: rg_move|epprd_rg|epprda|1|ACQUIRE' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :rg_move[76] version=%I% :rg_move[86] STATUS=0 :rg_move[88] [[ ! -n '' ]] :rg_move[90] EMULATE=REAL :rg_move[96] set -u :rg_move[98] NODENAME=epprda :rg_move[98] export NODENAME :rg_move[99] RGID=1 :rg_move[100] (( 3 == 3 )) :rg_move[102] ACTION=ACQUIRE :rg_move[108] : serial number for this event is 8602 :rg_move[112] RG_UP_POSTEVENT_ON_NODE=epprda :rg_move[112] export RG_UP_POSTEVENT_ON_NODE :rg_move[116] clodmget -qid=1 -f group -n HACMPgroup :rg_move[116] eval RGNAME=epprd_rg :rg_move[1] RGNAME=epprd_rg :rg_move[118] UPDATESTATD=0 :rg_move[119] export UPDATESTATD :rg_move[123] RG_MOVE_EVENT=true :rg_move[123] export RG_MOVE_EVENT :rg_move[128] group_state='$RESGRP_epprd_rg_epprda' :rg_move[129] set +u :rg_move[130] eval print '$RESGRP_epprd_rg_epprda' :rg_move[1] print :rg_move[130] RG_MOVE_ONLINE='' :rg_move[130] export RG_MOVE_ONLINE :rg_move[131] set -u :rg_move[132] RG_MOVE_ONLINE=TMP_ERROR :rg_move[139] rm -f /tmp/.NFSSTOPPED :rg_move[140] rm -f /tmp/.RPCLOCKDSTOPPED :rg_move[147] set -a :rg_move[148] clsetenvgrp epprda rg_move epprd_rg :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 :rg_move[148] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[149] RC=0 :rg_move[150] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[1] FORCEDOWN_GROUPS='' :rg_move[2] RESOURCE_GROUPS='' :rg_move[3] HOMELESS_GROUPS='' :rg_move[4] HOMELESS_FOLLOWER_GROUPS='' :rg_move[5] ERRSTATE_GROUPS='' :rg_move[6] PRINCIPAL_ACTIONS='' :rg_move[7] ASSOCIATE_ACTIONS='' :rg_move[8] AUXILLIARY_ACTIONS='' :rg_move[8] SIBLING_GROUPS='' :rg_move[9] SIBLING_NODES_BY_GROUP='' :rg_move[10] SIBLING_ACQUIRING_GROUPS='' :rg_move[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' :rg_move[12] SIBLING_RELEASING_GROUPS='' :rg_move[13] SIBLING_RELEASING_NODES_BY_GROUP='' :rg_move[151] set +a :rg_move[155] (( 0 != 0 )) :rg_move[155] [[ -z epprd_rg ]] :rg_move[164] [[ -z TRUE ]] :rg_move[241] AM_SYNC_CALLED_BY=RG_MOVE :rg_move[241] export AM_SYNC_CALLED_BY :rg_move[242] process_resources :process_resources[3318] version=1.169 :process_resources[3321] STATUS=0 :process_resources[3322] sddsrv_off=FALSE :process_resources[3324] true :process_resources[3326] : call rgpa, and it will tell us what to do next :process_resources[3328] set -a :process_resources[3329] clRGPA :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa 2023-01-28T19:50:38.182439 clrgpa :clRGPA[+55] exit 0 :process_resources[3329] eval JOB_TYPE=ACQUIRE RESOURCE_GROUPS='"epprd_rg"' PRINCIPAL_ACTION='"ACQUIRE"' AUXILLIARY_ACTION='"NONE"' :process_resources[1] JOB_TYPE=ACQUIRE :process_resources[1] RESOURCE_GROUPS=epprd_rg :process_resources[1] PRINCIPAL_ACTION=ACQUIRE :process_resources[1] AUXILLIARY_ACTION=NONE :process_resources[3330] RC=0 :process_resources[3331] set +a :process_resources[3333] (( 0 != 0 )) :process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[3360] [[ ACQUIRE == ONLINE ]] +epprd_rg:process_resources[3652] set_resource_group_state ACQUIRING +epprd_rg:process_resources[set_resource_group_state:82] PS4_FUNC=set_resource_group_state +epprd_rg:process_resources[set_resource_group_state:82] typeset PS4_FUNC +epprd_rg:process_resources[set_resource_group_state:83] [[ high == high ]] +epprd_rg:process_resources[set_resource_group_state:83] set -x +epprd_rg:process_resources[set_resource_group_state:84] STAT=0 +epprd_rg:process_resources[set_resource_group_state:85] new_status=ACQUIRING +epprd_rg:process_resources[set_resource_group_state:89] export GROUPNAME +epprd_rg:process_resources[set_resource_group_state:90] [[ ACQUIRING != DOWN ]] +epprd_rg:process_resources[set_resource_group_state:92] clchdaemons -d clstrmgr_scripts -t resource_locator -n epprda -o epprd_rg -v ACQUIRING +epprd_rg:process_resources[set_resource_group_state:100] : Resource Manager Updates +epprd_rg:process_resources[set_resource_group_state:105] amlog_trace '' 'acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:318] clcycle clavailability.log +epprd_rg:process_resources[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:process_resources[amlog_trace:319] cltime +epprd_rg:process_resources[amlog_trace:319] DATE=2023-01-28T19:50:38.217018 +epprd_rg:process_resources[amlog_trace:320] echo '|2023-01-28T19:50:38.217018|INFO: acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:process_resources[set_resource_group_state:106] cl_RMupdate acquiring epprd_rg process_resources 2023-01-28T19:50:38.241145 2023-01-28T19:50:38.245607 +epprd_rg:process_resources[set_resource_group_state:153] return 0 +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T19:50:38.257809 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=WPAR ACTION=ACQUIRE RESOURCE_GROUPS='"epprd_rg' '"' +epprd_rg:process_resources[1] JOB_TYPE=WPAR +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ WPAR == RELEASE ]] +epprd_rg:process_resources[3360] [[ WPAR == ONLINE ]] +epprd_rg:process_resources[3492] process_wpars ACQUIRE +epprd_rg:process_resources[process_wpars:3265] PS4_FUNC=process_wpars +epprd_rg:process_resources[process_wpars:3265] typeset PS4_FUNC +epprd_rg:process_resources[process_wpars:3266] [[ high == high ]] +epprd_rg:process_resources[process_wpars:3266] set -x +epprd_rg:process_resources[process_wpars:3267] STAT=0 +epprd_rg:process_resources[process_wpars:3268] action=ACQUIRE +epprd_rg:process_resources[process_wpars:3268] typeset action +epprd_rg:process_resources[process_wpars:3272] export GROUPNAME +epprd_rg:process_resources[process_wpars:3275] clstart_wpar +epprd_rg:clstart_wpar[180] version=1.12.1.1 +epprd_rg:clstart_wpar[184] [[ rg_move == reconfig_resource_acquire ]] +epprd_rg:clstart_wpar[184] [[ ACQUIRE_PRIMARY == reconfig_resource_acquire ]] +epprd_rg:clstart_wpar[193] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clstart_wpar[193] [[ -z '' ]] +epprd_rg:clstart_wpar[193] exit 0 +epprd_rg:process_resources[process_wpars:3276] RC=0 +epprd_rg:process_resources[process_wpars:3285] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[process_wpars:3294] return 0 +epprd_rg:process_resources[3493] RC=0 +epprd_rg:process_resources[3495] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T19:50:38.288567 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=SERVICE_LABELS ACTION=ACQUIRE IP_LABELS='"epprd"' RESOURCE_GROUPS='"epprd_rg' '"' COMMUNICATION_LINKS='""' +epprd_rg:process_resources[1] JOB_TYPE=SERVICE_LABELS +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] IP_LABELS=epprd +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] COMMUNICATION_LINKS='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ SERVICE_LABELS == RELEASE ]] +epprd_rg:process_resources[3360] [[ SERVICE_LABELS == ONLINE ]] +epprd_rg:process_resources[3407] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[3409] acquire_service_labels +epprd_rg:process_resources[acquire_service_labels:3083] PS4_FUNC=acquire_service_labels +epprd_rg:process_resources[acquire_service_labels:3083] typeset PS4_FUNC +epprd_rg:process_resources[acquire_service_labels:3084] [[ high == high ]] +epprd_rg:process_resources[acquire_service_labels:3084] set -x +epprd_rg:process_resources[acquire_service_labels:3085] STAT=0 +epprd_rg:process_resources[acquire_service_labels:3086] clcallev acquire_service_addr Jan 28 2023 19:50:38 EVENT START: acquire_service_addr |2023-01-28T19:50:38|8602|EVENT START: acquire_service_addr | +epprd_rg:acquire_service_addr[416] version=1.74.1.5 +epprd_rg:acquire_service_addr[423] [[ SERVICE_LABELS != 0 ]] +epprd_rg:acquire_service_addr[423] [[ SERVICE_LABELS != GROUP ]] +epprd_rg:acquire_service_addr[424] PROC_RES=true +epprd_rg:acquire_service_addr[440] saveNSORDER=UNDEFINED +epprd_rg:acquire_service_addr[441] NSORDER=local +epprd_rg:acquire_service_addr[442] export NSORDER +epprd_rg:acquire_service_addr[445] cl_RMupdate resource_acquiring All_service_addrs acquire_service_addr 2023-01-28T19:50:38.370739 2023-01-28T19:50:38.375017 +epprd_rg:acquire_service_addr[452] export GROUPNAME +epprd_rg:acquire_service_addr[458] [[ true == true ]] +epprd_rg:acquire_service_addr[459] get_list_head epprd +epprd_rg:acquire_service_addr[459] read SERVICELABELS +epprd_rg:acquire_service_addr[460] get_list_tail epprd +epprd_rg:acquire_service_addr[460] read IP_LABELS +epprd_rg:acquire_service_addr[471] clgetif -a epprd +epprd_rg:acquire_service_addr[471] 2> /dev/null +epprd_rg:acquire_service_addr[472] (( 3 != 0 )) +epprd_rg:acquire_service_addr[477] cllsif -J '~' -Sn epprd +epprd_rg:acquire_service_addr[477] cut -d~ -f3 +epprd_rg:acquire_service_addr[477] uniq +epprd_rg:acquire_service_addr[477] NETWORK=net_ether_01 +epprd_rg:acquire_service_addr[478] cllsif -J '~' -Si epprda +epprd_rg:acquire_service_addr[478] awk -F~ -v NET=net_ether_01 '{if ($2 == "boot" && $3 == NET) print $1}' +epprd_rg:acquire_service_addr[478] sort +epprd_rg:acquire_service_addr[478] boot_list=epprda +epprd_rg:acquire_service_addr[480] [[ -z epprda ]] +epprd_rg:acquire_service_addr[492] best_boot_addr net_ether_01 epprda +epprd_rg:acquire_service_addr[best_boot_addr:106] NETWORK=net_ether_01 +epprd_rg:acquire_service_addr[best_boot_addr:106] typeset NETWORK +epprd_rg:acquire_service_addr[best_boot_addr:107] shift +epprd_rg:acquire_service_addr[best_boot_addr:108] candidate_boots=epprda +epprd_rg:acquire_service_addr[best_boot_addr:108] typeset candidate_boots +epprd_rg:acquire_service_addr[best_boot_addr:112] echo epprda +epprd_rg:acquire_service_addr[best_boot_addr:112] tr ' ' '\n' +epprd_rg:acquire_service_addr[best_boot_addr:112] wc -l +epprd_rg:acquire_service_addr[best_boot_addr:112] num_candidates=' 1' +epprd_rg:acquire_service_addr[best_boot_addr:112] typeset -li num_candidates +epprd_rg:acquire_service_addr[best_boot_addr:113] (( 1 == 1 )) +epprd_rg:acquire_service_addr[best_boot_addr:114] echo epprda +epprd_rg:acquire_service_addr[best_boot_addr:115] return +epprd_rg:acquire_service_addr[492] boot_addr=epprda +epprd_rg:acquire_service_addr[493] (( 0 != 0 )) +epprd_rg:acquire_service_addr[505] clgetif -a epprda +epprd_rg:acquire_service_addr[505] 2> /dev/null +epprd_rg:acquire_service_addr[505] cut -f1 +epprd_rg:acquire_service_addr[505] INTERFACE='en0 ' +epprd_rg:acquire_service_addr[507] cllsif -J '~' -Sn epprda +epprd_rg:acquire_service_addr[507] cut -f7,9 -d~ +epprd_rg:acquire_service_addr[508] read boot_dot_addr INTERFACE +epprd_rg:acquire_service_addr[508] IFS='~' +epprd_rg:acquire_service_addr[510] [[ -z en0 ]] +epprd_rg:acquire_service_addr[527] cllsif -J '~' -Sn epprd +epprd_rg:acquire_service_addr[527] cut -f7,11,15 -d~ +epprd_rg:acquire_service_addr[527] uniq +epprd_rg:acquire_service_addr[528] read service_dot_addr NETMASK INET_FAMILY +epprd_rg:acquire_service_addr[528] IFS='~' +epprd_rg:acquire_service_addr[530] [[ AF_INET == AF_INET6 ]] +epprd_rg:acquire_service_addr[534] cl_swap_IP_address rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0 +epprd_rg:cl_swap_IP_address[462] version=1.9.14.8 +epprd_rg:cl_swap_IP_address[464] cl_get_path -S +epprd_rg:cl_swap_IP_address[464] OP_SEP='~' +epprd_rg:cl_swap_IP_address[465] LC_ALL=C +epprd_rg:cl_swap_IP_address[465] export LC_ALL +epprd_rg:cl_swap_IP_address[466] RESTORE_ROUTES=/usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[468] cl_echo 33 'Starting execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0' /usr/es/sbin/cluster/events/utils/cl_swap_IP_address 'rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0' Jan 28 2023 19:50:38Starting execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0+epprd_rg:cl_swap_IP_address[470] typeset -i oslevel +epprd_rg:cl_swap_IP_address[471] /usr/bin/oslevel -r +epprd_rg:cl_swap_IP_address[471] /usr/bin/sed s/-//g +epprd_rg:cl_swap_IP_address[471] oslevel=720005 +epprd_rg:cl_swap_IP_address[476] [[ 6 == 6 ]] +epprd_rg:cl_swap_IP_address[477] [[ 6 == 7 ]] +epprd_rg:cl_swap_IP_address[484] no -a +epprd_rg:cl_swap_IP_address[484] grep ipignoreredirects +epprd_rg:cl_swap_IP_address[484] awk '{ print $3 }' +epprd_rg:cl_swap_IP_address[484] PRIOR_IPIGNORE_REDIRECTS_VALUE=0 +epprd_rg:cl_swap_IP_address[485] /usr/sbin/no -o ipignoreredirects=1 Setting ipignoreredirects to 1 +epprd_rg:cl_swap_IP_address[490] PROC_RES=false +epprd_rg:cl_swap_IP_address[491] [[ SERVICE_LABELS != 0 ]] +epprd_rg:cl_swap_IP_address[491] [[ SERVICE_LABELS != GROUP ]] +epprd_rg:cl_swap_IP_address[492] PROC_RES=true +epprd_rg:cl_swap_IP_address[495] set -u +epprd_rg:cl_swap_IP_address[497] RC=0 +epprd_rg:cl_swap_IP_address[504] netstat -in Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll en0 1500 link#2 fa.e6.13.4e.a9.20 184323380 0 61166845 0 0 en0 1500 61.81.244 61.81.244.134 184323380 0 61166845 0 0 lo0 16896 link#1 35610558 0 35610558 0 0 lo0 16896 127 127.0.0.1 35610558 0 35610558 0 0 lo0 16896 ::1%1 35610558 0 35610558 0 0 +epprd_rg:cl_swap_IP_address[505] netstat -rnC Routing tables Destination Gateway Flags Wt Policy If Cost Config_Cost Route tree for Protocol Family 2 (Internet): default 61.81.244.1 UG 1 - en0 0 0 61.81.244.0 61.81.244.134 UHSb 1 - en0 0 0 => 61.81.244/24 61.81.244.134 U 1 - en0 0 0 61.81.244.134 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.255 61.81.244.134 UHSb 1 - en0 0 0 127/8 127.0.0.1 U 1 - lo0 0 0 Route tree for Protocol Family 24 (Internet v6): ::1%1 ::1%1 UH 1 - lo0 0 0 +epprd_rg:cl_swap_IP_address[506] CASC_OR_ROT=rotating +epprd_rg:cl_swap_IP_address[507] ACQ_OR_RLSE=acquire +epprd_rg:cl_swap_IP_address[508] IF=en0 +epprd_rg:cl_swap_IP_address[509] ADDR=61.81.244.156 +epprd_rg:cl_swap_IP_address[510] OLD_ADDR=61.81.244.134 +epprd_rg:cl_swap_IP_address[511] NETMASK=255.255.255.0 +epprd_rg:cl_swap_IP_address[514] [[ rotating == cascading ]] +epprd_rg:cl_swap_IP_address[525] cllsif -J '~' -Sw -n 61.81.244.156 +epprd_rg:cl_swap_IP_address[525] cut -f3 -d~ +epprd_rg:cl_swap_IP_address[525] NET=net_ether_01 +epprd_rg:cl_swap_IP_address[528] clodmget -qidentifier=61.81.244.156 -f max_aliases -n HACMPadapter +epprd_rg:cl_swap_IP_address[528] ALIAS_FIRST=0 +epprd_rg:cl_swap_IP_address[529] ifconfig en0 +epprd_rg:cl_swap_IP_address[529] LC_ALL=C +epprd_rg:cl_swap_IP_address[529] grep -c -w inet +epprd_rg:cl_swap_IP_address[529] NUM_ADDRS=1 +epprd_rg:cl_swap_IP_address[530] [[ acquire == acquire ]] +epprd_rg:cl_swap_IP_address[533] amlog_trace '' 'Aliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_swap_IP_address[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_swap_IP_address[amlog_trace:319] cltime +epprd_rg:cl_swap_IP_address[amlog_trace:319] DATE=2023-01-28T19:50:38.645861 +epprd_rg:cl_swap_IP_address[amlog_trace:320] echo '|2023-01-28T19:50:38.645861|INFO: Aliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_swap_IP_address[535] cl_echo 7310 'cl_swap_IP_address: Configuring network interface en0 with aliased IP address 61.81.244.156' cl_swap_IP_address en0 61.81.244.156 Jan 28 2023 19:50:38cl_swap_IP_address: Configuring network interface en0 with aliased IP address 61.81.244.156+epprd_rg:cl_swap_IP_address[546] (( 1 > 1 )) +epprd_rg:cl_swap_IP_address[550] clifconfig en0 alias 61.81.244.156 netmask 255.255.255.0 firstalias +epprd_rg:clifconfig[117] version=1.9 +epprd_rg:clifconfig[121] set -A args en0 alias 61.81.244.156 netmask 255.255.255.0 firstalias +epprd_rg:clifconfig[124] interface=en0 +epprd_rg:clifconfig[125] shift +epprd_rg:clifconfig[127] [[ -n alias ]] +epprd_rg:clifconfig[129] alias_val=1 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n 61.81.244.156 ]] +epprd_rg:clifconfig[147] params=' address=61.81.244.156' +epprd_rg:clifconfig[147] addr=61.81.244.156 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n netmask ]] +epprd_rg:clifconfig[149] params=' address=61.81.244.156 netmask=255.255.255.0' +epprd_rg:clifconfig[149] shift +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n firstalias ]] +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n '' ]] +epprd_rg:clifconfig[174] [[ -n 1 ]] +epprd_rg:clifconfig[174] [[ -n epprd_rg ]] +epprd_rg:clifconfig[175] clwparname epprd_rg +epprd_rg:clwparname[38] version=1.3.1.1 +epprd_rg:clwparname[44] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clwparname[44] [[ -z '' ]] +epprd_rg:clwparname[44] exit 0 +epprd_rg:clifconfig[175] WPARNAME='' +epprd_rg:clifconfig[176] (( 0 == 0 )) +epprd_rg:clifconfig[176] [[ -n '' ]] +epprd_rg:clifconfig[218] belongs_to_an_active_wpar 61.81.244.156 +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] [[ -z '' ]] +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] return 1 +epprd_rg:clifconfig[218] read wpar_name wpar_if wpar_netmask wpar_broadcast +epprd_rg:clifconfig[218] IFS='~' +epprd_rg:clifconfig[219] rc=1 +epprd_rg:clifconfig[221] [[ 1 == 0 ]] +epprd_rg:clifconfig[275] ifconfig en0 alias 61.81.244.156 netmask 255.255.255.0 firstalias +epprd_rg:cl_swap_IP_address[584] hats_adapter_notify en0 -e 61.81.244.156 alias 2023-01-28T19:50:38.699376 hats_adapter_notify 2023-01-28T19:50:38.703131 hats_adapter_notify +epprd_rg:cl_swap_IP_address[587] check_alias_status en0 61.81.244.156 acquire +epprd_rg:cl_swap_IP_address[check_alias_status:108] CH_INTERFACE=en0 +epprd_rg:cl_swap_IP_address[check_alias_status:109] CH_ADDRESS=61.81.244.156 +epprd_rg:cl_swap_IP_address[check_alias_status:110] CH_ACQ_OR_RLSE=acquire +epprd_rg:cl_swap_IP_address[check_alias_status:118] IF_IB=en0 +epprd_rg:cl_swap_IP_address[check_alias_status:120] echo en0 +epprd_rg:cl_swap_IP_address[check_alias_status:120] awk '{print index($0, "ib")}' +epprd_rg:cl_swap_IP_address[check_alias_status:120] IS_IB=0 +epprd_rg:cl_swap_IP_address[check_alias_status:122] [[ 0 != 1 ]] +epprd_rg:cl_swap_IP_address[check_alias_status:124] clifconfig en0 +epprd_rg:cl_swap_IP_address[check_alias_status:124] awk '{print $2}' +epprd_rg:cl_swap_IP_address[check_alias_status:124] fgrep -w 61.81.244.156 +epprd_rg:clifconfig[117] version=1.9 +epprd_rg:clifconfig[121] set -A args en0 +epprd_rg:clifconfig[124] interface=en0 +epprd_rg:clifconfig[125] shift +epprd_rg:clifconfig[127] [[ -n '' ]] +epprd_rg:clifconfig[174] [[ -n '' ]] +epprd_rg:clifconfig[218] belongs_to_an_active_wpar +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] [[ -z '' ]] +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] return 1 +epprd_rg:clifconfig[218] read wpar_name wpar_if wpar_netmask wpar_broadcast +epprd_rg:clifconfig[218] IFS='~' +epprd_rg:clifconfig[219] rc=1 +epprd_rg:clifconfig[221] [[ 1 == 0 ]] +epprd_rg:clifconfig[275] ifconfig en0 +epprd_rg:cl_swap_IP_address[check_alias_status:124] ADDR=61.81.244.156 +epprd_rg:cl_swap_IP_address[check_alias_status:129] [ acquire = acquire ] +epprd_rg:cl_swap_IP_address[check_alias_status:133] [[ 61.81.244.156 != 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[check_alias_status:144] return 0 +epprd_rg:cl_swap_IP_address[588] RC=0 +epprd_rg:cl_swap_IP_address[590] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[594] amlog_trace '' 'Aliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_swap_IP_address[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_swap_IP_address[amlog_trace:319] cltime +epprd_rg:cl_swap_IP_address[amlog_trace:319] DATE=2023-01-28T19:50:38.769737 +epprd_rg:cl_swap_IP_address[amlog_trace:320] echo '|2023-01-28T19:50:38.769737|INFO: Aliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_swap_IP_address[701] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[714] flush_arp +epprd_rg:cl_swap_IP_address[flush_arp:49] arp -an +epprd_rg:cl_swap_IP_address[flush_arp:49] grep '\?' +epprd_rg:cl_swap_IP_address[flush_arp:49] tr -d '()' +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.27 61.81.244.27 (61.81.244.27) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.218 61.81.244.218 (61.81.244.218) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.220 61.81.244.220 (61.81.244.220) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.221 61.81.244.221 (61.81.244.221) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.224 61.81.244.224 (61.81.244.224) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.239 61.81.244.239 (61.81.244.239) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.123 61.81.244.123 (61.81.244.123) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.145 61.81.244.145 (61.81.244.145) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.146 61.81.244.146 (61.81.244.146) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.1 61.81.244.1 (61.81.244.1) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.156 61.81.244.156 (61.81.244.156) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:52] return 0 +epprd_rg:cl_swap_IP_address[716] netstat -in Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll en0 1500 link#2 fa.e6.13.4e.a9.20 184323474 0 61166974 0 0 en0 1500 61.81.244 61.81.244.156 184323474 0 61166974 0 0 en0 1500 61.81.244 61.81.244.134 184323474 0 61166974 0 0 lo0 16896 link#1 35610562 0 35610562 0 0 lo0 16896 127 127.0.0.1 35610562 0 35610562 0 0 lo0 16896 ::1%1 35610562 0 35610562 0 0 +epprd_rg:cl_swap_IP_address[717] netstat -rnC Routing tables Destination Gateway Flags Wt Policy If Cost Config_Cost Route tree for Protocol Family 2 (Internet): default 61.81.244.1 UG 1 - en0 0 0 61.81.244.0 61.81.244.156 UHSb 1 - en0 0 0 => 61.81.244/24 61.81.244.156 U 1 - en0 0 0 61.81.244.134 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.156 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.255 61.81.244.156 UHSb 1 - en0 0 0 127/8 127.0.0.1 U 1 - lo0 0 0 Route tree for Protocol Family 24 (Internet v6): ::1%1 ::1%1 UH 1 - lo0 0 0 +epprd_rg:cl_swap_IP_address[989] no -o ipignoreredirects=0 Setting ipignoreredirects to 0 +epprd_rg:cl_swap_IP_address[992] cl_echo 32 'Completed execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0. Exit status = 0' /usr/es/sbin/cluster/events/utils/cl_swap_IP_address 'rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0' 0 Jan 28 2023 19:50:38Completed execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0. Exit status = 0+epprd_rg:cl_swap_IP_address[994] date Sat Jan 28 19:50:38 KORST 2023 +epprd_rg:cl_swap_IP_address[996] exit 0 +epprd_rg:acquire_service_addr[537] RC=0 +epprd_rg:acquire_service_addr[539] (( 0 != 0 )) +epprd_rg:acquire_service_addr[549] [[ true == false ]] +epprd_rg:acquire_service_addr[560] cl_RMupdate resource_up All_nonerror_service_addrs acquire_service_addr 2023-01-28T19:50:38.858259 2023-01-28T19:50:38.862566 +epprd_rg:acquire_service_addr[565] [[ UNDEFINED != UNDEFINED ]] +epprd_rg:acquire_service_addr[568] NSORDER='' +epprd_rg:acquire_service_addr[568] export NSORDER +epprd_rg:acquire_service_addr[571] [[ true == false ]] +epprd_rg:acquire_service_addr[579] exit 0 Jan 28 2023 19:50:38 EVENT COMPLETED: acquire_service_addr 0 |2023-01-28T19:50:38|8602|EVENT COMPLETED: acquire_service_addr 0| +epprd_rg:process_resources[acquire_service_labels:3087] RC=0 +epprd_rg:process_resources[acquire_service_labels:3089] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[acquire_service_labels:3104] (( 0 != 0 )) +epprd_rg:process_resources[acquire_service_labels:3110] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. +epprd_rg:process_resources[acquire_service_labels:3112] return 0 +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T19:50:38.941845 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=DISKS ACTION=ACQUIRE HDISKS='"hdisk2,hdisk3,hdisk4,hdisk5,hdisk6,hdisk7,hdisk8"' RESOURCE_GROUPS='"epprd_rg' '"' VOLUME_GROUPS='"datavg,datavg,datavg,datavg,datavg,datavg,datavg"' +epprd_rg:process_resources[1] JOB_TYPE=DISKS +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] HDISKS=hdisk2,hdisk3,hdisk4,hdisk5,hdisk6,hdisk7,hdisk8 +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] VOLUME_GROUPS=datavg,datavg,datavg,datavg,datavg,datavg,datavg +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ DISKS == RELEASE ]] +epprd_rg:process_resources[3360] [[ DISKS == ONLINE ]] +epprd_rg:process_resources[3439] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[3441] FAILED_RR_RGS='' +epprd_rg:process_resources[3442] get_disks_main +epprd_rg:process_resources[get_disks_main:981] PS4_FUNC=get_disks_main +epprd_rg:process_resources[get_disks_main:981] typeset PS4_FUNC +epprd_rg:process_resources[get_disks_main:982] [[ high == high ]] +epprd_rg:process_resources[get_disks_main:982] set -x +epprd_rg:process_resources[get_disks_main:983] SKIPBRKRES=0 +epprd_rg:process_resources[get_disks_main:983] typeset -li SKIPBRKRES +epprd_rg:process_resources[get_disks_main:984] STAT=0 +epprd_rg:process_resources[get_disks_main:985] FAILURE_IN_METHOD=0 +epprd_rg:process_resources[get_disks_main:985] typeset -li FAILURE_IN_METHOD +epprd_rg:process_resources[get_disks_main:986] LIST_OF_FAILED_RGS='' +epprd_rg:process_resources[get_disks_main:989] : Below are the list of resources as generated by clrgpa +epprd_rg:process_resources[get_disks_main:991] RG_LIST=epprd_rg +epprd_rg:process_resources[get_disks_main:992] RDISK_LIST='' +epprd_rg:process_resources[get_disks_main:993] DISK_LIST=hdisk2,hdisk3,hdisk4,hdisk5,hdisk6,hdisk7,hdisk8 +epprd_rg:process_resources[get_disks_main:994] VG_LIST=datavg,datavg,datavg,datavg,datavg,datavg,datavg +epprd_rg:process_resources[get_disks_main:997] : Resource groups are processed individually. This is required because +epprd_rg:process_resources[get_disks_main:998] : the replication mechanism may differ between resource groups. +epprd_rg:process_resources[get_disks_main:1002] getReplicatedResources epprd_rg +epprd_rg:process_resources[getReplicatedResources:699] PS4_FUNC=getReplicatedResources +epprd_rg:process_resources[getReplicatedResources:699] typeset PS4_FUNC +epprd_rg:process_resources[getReplicatedResources:700] [[ high == high ]] +epprd_rg:process_resources[getReplicatedResources:700] set -x +epprd_rg:process_resources[getReplicatedResources:702] RV=false +epprd_rg:process_resources[getReplicatedResources:704] clodmget -n -f type HACMPrresmethods +epprd_rg:process_resources[getReplicatedResources:704] [[ -n 9 ]] +epprd_rg:process_resources[getReplicatedResources:707] : Replicated resource methods are defined, check for resources +epprd_rg:process_resources[getReplicatedResources:709] clodmget -q $'name like \'*_REP_RESOURCE\' AND group=epprd_rg' -f value -n HACMPresource +epprd_rg:process_resources[getReplicatedResources:709] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:718] : Verify if any backup profiles are configured and trigger cbm utilities based on that +epprd_rg:process_resources[getReplicatedResources:720] clodmget -q name=BACKUP_ENABLED -f value HACMPresource +epprd_rg:process_resources[getReplicatedResources:720] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:739] echo false +epprd_rg:process_resources[get_disks_main:1002] REPLICATED_RESOURCES=false +epprd_rg:process_resources[get_disks_main:1005] : Break out the resources for resource group epprd_rg +epprd_rg:process_resources[get_disks_main:1007] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[get_disks_main:1008] VOLUME_GROUPS='' +epprd_rg:process_resources[get_disks_main:1009] HDISKS='' +epprd_rg:process_resources[get_disks_main:1010] RHDISKS='' +epprd_rg:process_resources[get_disks_main:1011] RDISK_LIST='' +epprd_rg:process_resources[get_disks_main:1014] : Get the volume groups in resource group epprd_rg +epprd_rg:process_resources[get_disks_main:1016] print datavg,datavg,datavg,datavg,datavg,datavg,datavg +epprd_rg:process_resources[get_disks_main:1016] read VOLUME_GROUPS VG_LIST +epprd_rg:process_resources[get_disks_main:1016] IFS=: +epprd_rg:process_resources[get_disks_main:1018] : Removing duplicate entries in VG list. +epprd_rg:process_resources[get_disks_main:1020] echo datavg,datavg,datavg,datavg,datavg,datavg,datavg +epprd_rg:process_resources[get_disks_main:1020] sort -u +epprd_rg:process_resources[get_disks_main:1020] tr , '\n' +epprd_rg:process_resources[get_disks_main:1020] xargs +epprd_rg:process_resources[get_disks_main:1020] VOLUME_GROUPS=datavg +epprd_rg:process_resources[get_disks_main:1022] : Get the disks corresponding to these volume groups +epprd_rg:process_resources[get_disks_main:1024] print hdisk2,hdisk3,hdisk4,hdisk5,hdisk6,hdisk7,hdisk8 +epprd_rg:process_resources[get_disks_main:1024] read HDISKS DISK_LIST +epprd_rg:process_resources[get_disks_main:1024] IFS=: +epprd_rg:process_resources[get_disks_main:1025] HDISKS='hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8' +epprd_rg:process_resources[get_disks_main:1031] : Pick up any raw disks not returned by clrgpa +epprd_rg:process_resources[get_disks_main:1033] clodmget -q group='epprd_rg AND name=RAW_DISK' HACMPresource +epprd_rg:process_resources[get_disks_main:1033] [[ -n '' ]] +epprd_rg:process_resources[get_disks_main:1042] : Get any raw disks in resource group epprd_rg +epprd_rg:process_resources[get_disks_main:1045] print +epprd_rg:process_resources[get_disks_main:1045] read RHDISKS RDISK_LIST +epprd_rg:process_resources[get_disks_main:1045] IFS=: +epprd_rg:process_resources[get_disks_main:1046] RHDISKS='' +epprd_rg:process_resources[get_disks_main:1047] print datavg +epprd_rg:process_resources[get_disks_main:1047] read VOLUME_GROUPS +epprd_rg:process_resources[get_disks_main:1051] : At this point, the global variables below should be set to +epprd_rg:process_resources[get_disks_main:1052] : the values associated with resource group epprd_rg +epprd_rg:process_resources[get_disks_main:1054] export RESOURCE_GROUPS +epprd_rg:process_resources[get_disks_main:1055] export VOLUME_GROUPS +epprd_rg:process_resources[get_disks_main:1056] export HDISKS +epprd_rg:process_resources[get_disks_main:1057] export RHDISKS +epprd_rg:process_resources[get_disks_main:1059] [[ false == true ]] +epprd_rg:process_resources[get_disks_main:1182] get_disks +epprd_rg:process_resources[get_disks:1198] PS4_FUNC=get_disks +epprd_rg:process_resources[get_disks:1198] typeset PS4_FUNC +epprd_rg:process_resources[get_disks:1199] [[ high == high ]] +epprd_rg:process_resources[get_disks:1199] set -x +epprd_rg:process_resources[get_disks:1201] STAT=0 +epprd_rg:process_resources[get_disks:1204] : Most volume groups are Enhanced Concurrent Mode, and it should +epprd_rg:process_resources[get_disks:1205] : not be necessary to break reserves. If all the volume groups +epprd_rg:process_resources[get_disks:1206] : are ECM, we should be able to skip breaking reserves. If it +epprd_rg:process_resources[get_disks:1207] : turns out that there is a reserve on a disk in an ECM volume +epprd_rg:process_resources[get_disks:1208] : group, that will be handled by cl_pvo making an explicit call +epprd_rg:process_resources[get_disks:1209] : to cl_disk_available. +epprd_rg:process_resources[get_disks:1213] all_ecm=TRUE +epprd_rg:process_resources[get_disks:1214] IFS=: +epprd_rg:process_resources[get_disks:1214] set -- datavg +epprd_rg:process_resources[get_disks:1214] print datavg +epprd_rg:process_resources[get_disks:1216] print datavg +epprd_rg:process_resources[get_disks:1216] tr , '\n' +epprd_rg:process_resources[get_disks:1216] sort -u +epprd_rg:process_resources[get_disks:1218] clodmget -q 'name = datavg and attribute = conc_capable' -f value -n CuAt +epprd_rg:process_resources[get_disks:1218] [[ y != y ]] +epprd_rg:process_resources[get_disks:1224] [[ TRUE == FALSE ]] +epprd_rg:process_resources[get_disks:1226] [[ TRUE == TRUE ]] +epprd_rg:process_resources[get_disks:1226] return 0 +epprd_rg:process_resources[get_disks_main:1183] STAT=0 +epprd_rg:process_resources[get_disks_main:1186] return 0 +epprd_rg:process_resources[3443] echo +epprd_rg:process_resources[3443] tr ' ' '\n' +epprd_rg:process_resources[3443] FAILED_RR_RGS='' +epprd_rg:process_resources[3444] [[ -n '' ]] +epprd_rg:process_resources[3450] clodmget -n -q policy=scsi -f value HACMPsplitmerge +epprd_rg:process_resources[3450] SCSIPR_ENABLED='' +epprd_rg:process_resources[3450] typeset SCSIPR_ENABLED +epprd_rg:process_resources[3451] [[ '' == Yes ]] +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T19:50:39.029857 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=VGS ACTION=ACQUIRE CONCURRENT_VOLUME_GROUP='""' VOLUME_GROUPS='"datavg"' RESOURCE_GROUPS='"epprd_rg' '"' EXPORT_FILESYSTEM='""' +epprd_rg:process_resources[1] JOB_TYPE=VGS +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] CONCURRENT_VOLUME_GROUP='' +epprd_rg:process_resources[1] VOLUME_GROUPS=datavg +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] EXPORT_FILESYSTEM='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ VGS == RELEASE ]] +epprd_rg:process_resources[3360] [[ VGS == ONLINE ]] +epprd_rg:process_resources[3571] process_volume_groups_main ACQUIRE +epprd_rg:process_resources[process_volume_groups_main:2293] PS4_FUNC=process_volume_groups_main +epprd_rg:process_resources[process_volume_groups_main:2293] typeset PS4_FUNC +epprd_rg:process_resources[process_volume_groups_main:2294] [[ high == high ]] +epprd_rg:process_resources[process_volume_groups_main:2294] set -x +epprd_rg:process_resources[process_volume_groups_main:2295] DEF_VARYON_ACTION=0 +epprd_rg:process_resources[process_volume_groups_main:2295] typeset -li DEF_VARYON_ACTION +epprd_rg:process_resources[process_volume_groups_main:2296] FAILURE_IN_METHOD=0 +epprd_rg:process_resources[process_volume_groups_main:2296] typeset -li FAILURE_IN_METHOD +epprd_rg:process_resources[process_volume_groups_main:2297] ACTION=ACQUIRE +epprd_rg:process_resources[process_volume_groups_main:2297] typeset ACTION +epprd_rg:process_resources[process_volume_groups_main:2298] STAT=0 +epprd_rg:process_resources[process_volume_groups_main:2299] VG_LIST=datavg +epprd_rg:process_resources[process_volume_groups_main:2300] RG_LIST=epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2304] getReplicatedResources epprd_rg +epprd_rg:process_resources[getReplicatedResources:699] PS4_FUNC=getReplicatedResources +epprd_rg:process_resources[getReplicatedResources:699] typeset PS4_FUNC +epprd_rg:process_resources[getReplicatedResources:700] [[ high == high ]] +epprd_rg:process_resources[getReplicatedResources:700] set -x +epprd_rg:process_resources[getReplicatedResources:702] RV=false +epprd_rg:process_resources[getReplicatedResources:704] clodmget -n -f type HACMPrresmethods +epprd_rg:process_resources[getReplicatedResources:704] [[ -n 9 ]] +epprd_rg:process_resources[getReplicatedResources:707] : Replicated resource methods are defined, check for resources +epprd_rg:process_resources[getReplicatedResources:709] clodmget -q $'name like \'*_REP_RESOURCE\' AND group=epprd_rg' -f value -n HACMPresource +epprd_rg:process_resources[getReplicatedResources:709] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:718] : Verify if any backup profiles are configured and trigger cbm utilities based on that +epprd_rg:process_resources[getReplicatedResources:720] clodmget -q name=BACKUP_ENABLED -f value HACMPresource +epprd_rg:process_resources[getReplicatedResources:720] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:739] echo false +epprd_rg:process_resources[process_volume_groups_main:2304] REPLICATED_RESOURCES=false +epprd_rg:process_resources[process_volume_groups_main:2305] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2306] print -- datavg +epprd_rg:process_resources[process_volume_groups_main:2306] read VOLUME_GROUPS VG_LIST +epprd_rg:process_resources[process_volume_groups_main:2306] IFS=: +epprd_rg:process_resources[process_volume_groups_main:2307] VOLUME_GROUPS=datavg +epprd_rg:process_resources[process_volume_groups_main:2310] : At this point, these variables contain information only for epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2312] export VOLUME_GROUPS +epprd_rg:process_resources[process_volume_groups_main:2313] export RESOURCE_GROUPS +epprd_rg:process_resources[process_volume_groups_main:2315] [[ false == true ]] +epprd_rg:process_resources[process_volume_groups_main:2555] process_volume_groups ACQUIRE +epprd_rg:process_resources[process_volume_groups:2571] PS4_FUNC=process_volume_groups +epprd_rg:process_resources[process_volume_groups:2571] typeset PS4_FUNC +epprd_rg:process_resources[process_volume_groups:2572] [[ high == high ]] +epprd_rg:process_resources[process_volume_groups:2572] set -x +epprd_rg:process_resources[process_volume_groups:2573] STAT=0 +epprd_rg:process_resources[process_volume_groups:2575] GROUPNAME=epprd_rg +epprd_rg:process_resources[process_volume_groups:2575] export GROUPNAME +epprd_rg:process_resources[process_volume_groups:2578] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[process_volume_groups:2581] : Varyon the VGs in the environment +epprd_rg:process_resources[process_volume_groups:2583] cl_activate_vgs -n +epprd_rg:cl_activate_vgs[213] [[ high == high ]] +epprd_rg:cl_activate_vgs[213] version=1.46 +epprd_rg:cl_activate_vgs[215] STATUS=0 +epprd_rg:cl_activate_vgs[215] typeset -li STATUS +epprd_rg:cl_activate_vgs[216] SYNCFLAG='' +epprd_rg:cl_activate_vgs[217] CLENV='' +epprd_rg:cl_activate_vgs[218] TMP_FILENAME=/tmp/_activate_vgs.tmp +epprd_rg:cl_activate_vgs[219] USE_OEM_METHODS=false +epprd_rg:cl_activate_vgs[221] PROC_RES=false +epprd_rg:cl_activate_vgs[225] [[ VGS != 0 ]] +epprd_rg:cl_activate_vgs[225] [[ VGS != GROUP ]] +epprd_rg:cl_activate_vgs[226] PROC_RES=true +epprd_rg:cl_activate_vgs[232] [[ -n == -n ]] +epprd_rg:cl_activate_vgs[234] SYNCFLAG=-n +epprd_rg:cl_activate_vgs[235] shift +epprd_rg:cl_activate_vgs[240] (( 0 != 0 )) +epprd_rg:cl_activate_vgs[247] set -u +epprd_rg:cl_activate_vgs[250] rm -f /tmp/_activate_vgs.tmp +epprd_rg:cl_activate_vgs[254] lsvg -L -o +epprd_rg:cl_activate_vgs[254] print caavg_private rootvg +epprd_rg:cl_activate_vgs[254] VGSTATUS='caavg_private rootvg' +epprd_rg:cl_activate_vgs[257] ALLVGS=All_volume_groups +epprd_rg:cl_activate_vgs[258] cl_RMupdate resource_acquiring All_volume_groups cl_activate_vgs 2023-01-28T19:50:39.103405 2023-01-28T19:50:39.107687 +epprd_rg:cl_activate_vgs[262] [[ true == false ]] +epprd_rg:cl_activate_vgs[285] LIST_OF_VOLUME_GROUPS_FOR_RG='' +epprd_rg:cl_activate_vgs[289] export GROUPNAME +epprd_rg:cl_activate_vgs[291] echo datavg +epprd_rg:cl_activate_vgs[291] read LIST_OF_VOLUME_GROUPS_FOR_RG VOLUME_GROUPS +epprd_rg:cl_activate_vgs[291] IFS=: +epprd_rg:cl_activate_vgs[294] echo datavg +epprd_rg:cl_activate_vgs[295] tr , '\n' +epprd_rg:cl_activate_vgs[296] sort -u +epprd_rg:cl_activate_vgs[294] LIST_OF_VOLUME_GROUPS_FOR_RG=datavg +epprd_rg:cl_activate_vgs[298] vgs_list datavg +epprd_rg:cl_activate_vgs[vgs_list:178] PS4_LOOP='' +epprd_rg:cl_activate_vgs[vgs_list:178] typeset PS4_LOOP +epprd_rg:cl_activate_vgs:datavg[vgs_list:182] PS4_LOOP=datavg +epprd_rg:cl_activate_vgs:datavg[vgs_list:186] [[ 'caavg_private rootvg' == @(?(*\ )datavg?(\ *)) ]] +epprd_rg:cl_activate_vgs:datavg[vgs_list:192] : call varyon for the volume group in Foreground +epprd_rg:cl_activate_vgs:datavg[vgs_list:194] vgs_chk datavg -n cl_activate_vgs +epprd_rg:cl_activate_vgs(0.055):datavg[vgs_chk:78] VG=datavg +epprd_rg:cl_activate_vgs(0.055):datavg[vgs_chk:78] typeset VG +epprd_rg:cl_activate_vgs(0.055):datavg[vgs_chk:79] SYNCFLAG=-n +epprd_rg:cl_activate_vgs(0.055):datavg[vgs_chk:79] typeset SYNCFLAG +epprd_rg:cl_activate_vgs(0.055):datavg[vgs_chk:80] PROGNAME=cl_activate_vgs +epprd_rg:cl_activate_vgs(0.055):datavg[vgs_chk:80] typeset PROGNAME +epprd_rg:cl_activate_vgs(0.055):datavg[vgs_chk:81] STATUS=0 +epprd_rg:cl_activate_vgs(0.055):datavg[vgs_chk:81] typeset -li STATUS +epprd_rg:cl_activate_vgs(0.055):datavg[vgs_chk:83] [[ -n '' ]] +epprd_rg:cl_activate_vgs(0.055):datavg[vgs_chk:100] amlog_trace '' 'Activating Volume Group|datavg' +epprd_rg:cl_activate_vgs(0.055):datavg[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_vgs(0.056):datavg[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_vgs(0.084):datavg[amlog_trace:319] cltime +epprd_rg:cl_activate_vgs(0.087):datavg[amlog_trace:319] DATE=2023-01-28T19:50:39.151198 +epprd_rg:cl_activate_vgs(0.087):datavg[amlog_trace:320] echo '|2023-01-28T19:50:39.151198|INFO: Activating Volume Group|datavg' +epprd_rg:cl_activate_vgs(0.087):datavg[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_vgs(0.087):datavg[vgs_chk:102] typeset -x ERRMSG +epprd_rg:cl_activate_vgs(0.087):datavg[vgs_chk:103] clvaryonvg -n datavg +epprd_rg:clvaryonvg(0.009):datavg[985] version=1.21.7.22 +epprd_rg:clvaryonvg(0.009):datavg[989] : Without this test, cause of failure due to non-root may not be obvious +epprd_rg:clvaryonvg(0.009):datavg[991] [[ -z '' ]] +epprd_rg:clvaryonvg(0.009):datavg[991] id -nu +epprd_rg:clvaryonvg(0.010):datavg[991] 2> /dev/null +epprd_rg:clvaryonvg(0.012):datavg[991] user_name=root +epprd_rg:clvaryonvg(0.012):datavg[994] : Check if RBAC is enabled +epprd_rg:clvaryonvg(0.012):datavg[996] is_rbac_enabled='' +epprd_rg:clvaryonvg(0.012):datavg[996] typeset is_rbac_enabled +epprd_rg:clvaryonvg(0.012):datavg[997] clodmget -nq group='LDAPClient and name=RBACConfig' -f value HACMPLDAP +epprd_rg:clvaryonvg(0.013):datavg[997] 2> /dev/null +epprd_rg:clvaryonvg(0.016):datavg[997] is_rbac_enabled='' +epprd_rg:clvaryonvg(0.016):datavg[999] role='' +epprd_rg:clvaryonvg(0.016):datavg[999] typeset role +epprd_rg:clvaryonvg(0.016):datavg[1000] [[ root != root ]] +epprd_rg:clvaryonvg(0.016):datavg[1009] LEAVEOFF=FALSE +epprd_rg:clvaryonvg(0.016):datavg[1010] FORCEON='' +epprd_rg:clvaryonvg(0.016):datavg[1011] FORCEUPD=FALSE +epprd_rg:clvaryonvg(0.016):datavg[1012] NOQUORUM=20 +epprd_rg:clvaryonvg(0.016):datavg[1013] MISSING_UPDATES=30 +epprd_rg:clvaryonvg(0.016):datavg[1014] DATA_DIVERGENCE=31 +epprd_rg:clvaryonvg(0.016):datavg[1015] ARGS='' +epprd_rg:clvaryonvg(0.016):datavg[1016] typeset -li varyonvg_rc +epprd_rg:clvaryonvg(0.016):datavg[1017] typeset -li MAXLVS +epprd_rg:clvaryonvg(0.016):datavg[1018] ENODEV=19 +epprd_rg:clvaryonvg(0.016):datavg[1018] typeset -li ENODEV +epprd_rg:clvaryonvg(0.016):datavg[1020] set -u +epprd_rg:clvaryonvg(0.016):datavg[1022] /bin/dspmsg -s 2 cspoc.cat 31 'usage: clvaryonvg [-F] [-f] [-n] [-p] [-s] [-o] \n' +epprd_rg:clvaryonvg(0.019):datavg[1022] USAGE='usage: clvaryonvg [-F] [-f] [-n] [-p] [-s] [-o] ' +epprd_rg:clvaryonvg(0.019):datavg[1023] (( 2 < 1 )) +epprd_rg:clvaryonvg(0.019):datavg[1029] : Parse the options +epprd_rg:clvaryonvg(0.019):datavg[1031] S_FLAG='' +epprd_rg:clvaryonvg(0.019):datavg[1032] P_FLAG='' +epprd_rg:clvaryonvg(0.019):datavg[1033] getopts :Ffnops option +epprd_rg:clvaryonvg(0.019):datavg[1038] : -n Always applied, retained for compatibility +epprd_rg:clvaryonvg(0.019):datavg[1033] getopts :Ffnops option +epprd_rg:clvaryonvg(0.019):datavg[1048] : Pick up the volume group name, which follows the options +epprd_rg:clvaryonvg(0.019):datavg[1050] shift 1 +epprd_rg:clvaryonvg(0.019):datavg[1051] VG=datavg +epprd_rg:clvaryonvg(0.019):datavg[1054] : Set up filenames we will be using +epprd_rg:clvaryonvg(0.019):datavg[1056] VGDIR=/usr/es/sbin/cluster/etc/vg/ +epprd_rg:clvaryonvg(0.019):datavg[1057] TSFILE=/usr/es/sbin/cluster/etc/vg/datavg.tstamp +epprd_rg:clvaryonvg(0.019):datavg[1058] DSFILE=/usr/es/sbin/cluster/etc/vg/datavg.desc +epprd_rg:clvaryonvg(0.019):datavg[1059] RPFILE=/usr/es/sbin/cluster/etc/vg/datavg.replay +epprd_rg:clvaryonvg(0.019):datavg[1060] permset=/usr/es/sbin/cluster/etc/vg/datavg.perms +epprd_rg:clvaryonvg(0.019):datavg[1061] failfile=/usr/es/sbin/cluster/etc/vg/datavg.fail +epprd_rg:clvaryonvg(0.019):datavg[1065] : Get some LVM information we are going to need in processing this +epprd_rg:clvaryonvg(0.019):datavg[1066] : volume group: +epprd_rg:clvaryonvg(0.019):datavg[1067] : - volume group identifier - vgid +epprd_rg:clvaryonvg(0.019):datavg[1068] : - list of disks +epprd_rg:clvaryonvg(0.019):datavg[1069] : - quorum indicator +epprd_rg:clvaryonvg(0.019):datavg[1070] : - timestamp if present +epprd_rg:clvaryonvg(0.019):datavg[1072] /usr/sbin/getlvodm -v datavg +epprd_rg:clvaryonvg(0.022):datavg[1072] VGID=00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(0.024):datavg[1073] cut '-d ' -f2 +epprd_rg:clvaryonvg(0.024):datavg[1073] /usr/sbin/getlvodm -w 00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(0.027):datavg[1073] pvlst=$'hdisk2\nhdisk3\nhdisk4\nhdisk5\nhdisk6\nhdisk7\nhdisk8' +epprd_rg:clvaryonvg(0.027):datavg[1074] /usr/sbin/getlvodm -Q datavg +epprd_rg:clvaryonvg(0.031):datavg[1074] quorum=y +epprd_rg:clvaryonvg(0.031):datavg[1075] TS_FROM_DISK='' +epprd_rg:clvaryonvg(0.031):datavg[1076] TS_FROM_ODM='' +epprd_rg:clvaryonvg(0.031):datavg[1077] GOOD_PV='' +epprd_rg:clvaryonvg(0.031):datavg[1078] O_flag='' +epprd_rg:clvaryonvg(0.031):datavg[1079] A_flag='' +epprd_rg:clvaryonvg(0.031):datavg[1080] mode_flag='' +epprd_rg:clvaryonvg(0.031):datavg[1081] vg_on_mode='' +epprd_rg:clvaryonvg(0.031):datavg[1082] vg_set_passive=FALSE +epprd_rg:clvaryonvg(0.031):datavg[1084] odmget -q 'attribute = varyon_state' PdAt +epprd_rg:clvaryonvg(0.034):datavg[1084] [[ -n $'\nPdAt:\n\tuniquetype = "logical_volume/vgsubclass/vgtype"\n\tattribute = "varyon_state"\n\tdeflt = "0"\n\tvalues = "0,1,2,3"\n\twidth = ""\n\ttype = "R"\n\tgeneric = ""\n\trep = "l"\n\tnls_index = 0' ]] +epprd_rg:clvaryonvg(0.034):datavg[1087] : LVM may record that a volume group was varied on from an earlier +epprd_rg:clvaryonvg(0.034):datavg[1088] : IPL. Rely on HA state tracking, and override the LVM check +epprd_rg:clvaryonvg(0.034):datavg[1090] O_flag=-O +epprd_rg:clvaryonvg(0.034):datavg[1093] : Checking if SCSI PR is enabled and it is so, +epprd_rg:clvaryonvg(0.034):datavg[1094] : confirming if the SCSI PR reservations are intact. +epprd_rg:clvaryonvg(0.035):datavg[1096] lssrc -ls clstrmgrES +epprd_rg:clvaryonvg(0.035):datavg[1096] 2>& 1 +epprd_rg:clvaryonvg(0.035):datavg[1096] egrep -q -v 'ST_INIT|NOT_CONFIGURED' +epprd_rg:clvaryonvg(0.035):datavg[1096] grep 'Current state:' +epprd_rg:clvaryonvg(0.051):datavg[1098] clodmget -n -q policy=scsi -f value HACMPsplitmerge +epprd_rg:clvaryonvg(0.054):datavg[1098] SCSIPR_ENABLED='' +epprd_rg:clvaryonvg(0.054):datavg[1098] typeset SCSIPR_ENABLED +epprd_rg:clvaryonvg(0.054):datavg[1099] clodmget -q $'name like \'*VOLUME_GROUP\' and value = datavg' -f group -n HACMPresource +epprd_rg:clvaryonvg(0.058):datavg[1099] resgrp=epprd_rg +epprd_rg:clvaryonvg(0.058):datavg[1099] typeset resgrp +epprd_rg:clvaryonvg(0.058):datavg[1100] [[ '' == Yes ]] +epprd_rg:clvaryonvg(0.058):datavg[1134] : Operations such as varying on the volume group are likely to +epprd_rg:clvaryonvg(0.058):datavg[1135] : require read/write access. So, set any volume group fencing appropriately. +epprd_rg:clvaryonvg(0.058):datavg[1137] cl_set_vg_fence_height -c datavg rw +epprd_rg:clvaryonvg(0.061):datavg[1138] RC=0 +epprd_rg:clvaryonvg(0.061):datavg[1139] (( 19 == 0 )) +epprd_rg:clvaryonvg(0.061):datavg[1147] : Return code from volume group fencing for datavg is 0 +epprd_rg:clvaryonvg(0.061):datavg[1148] (( 0 != 0 )) +epprd_rg:clvaryonvg(0.061):datavg[1160] : Check on the current state of the volume group +epprd_rg:clvaryonvg(0.063):datavg[1182] grep -x -q datavg +epprd_rg:clvaryonvg(0.063):datavg[1182] lsvg -L +epprd_rg:clvaryonvg(0.066):datavg[1184] : The volume group is known - check to see if its already varyd on. +epprd_rg:clvaryonvg(0.067):datavg[1186] grep -x -q datavg +epprd_rg:clvaryonvg(0.067):datavg[1186] lsvg -L -o +epprd_rg:clvaryonvg(0.071):datavg[1190] lsvg -L datavg +epprd_rg:clvaryonvg(0.071):datavg[1190] 2> /dev/null +epprd_rg:clvaryonvg(0.071):datavg[1190] grep -q -i -w passive-only +epprd_rg:clvaryonvg(0.117):datavg[1191] vg_on_mode=passive +epprd_rg:clvaryonvg(0.119):datavg[1194] grep -iw removed +epprd_rg:clvaryonvg(0.119):datavg[1194] lsvg -p datavg +epprd_rg:clvaryonvg(0.119):datavg[1194] 2> /dev/null +epprd_rg:clvaryonvg(0.140):datavg[1194] removed_disks='' +epprd_rg:clvaryonvg(0.140):datavg[1195] [[ -n '' ]] +epprd_rg:clvaryonvg(0.140):datavg[1213] [[ -n passive ]] +epprd_rg:clvaryonvg(0.140):datavg[1215] lqueryvg -g 00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(0.141):datavg[1215] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.160):datavg[1321] : +epprd_rg:clvaryonvg(0.160):datavg[1322] : First, sniff at the disk to see if the local ODM information +epprd_rg:clvaryonvg(0.160):datavg[1323] : matches what is on the disk. +epprd_rg:clvaryonvg(0.160):datavg[1324] : +epprd_rg:clvaryonvg(0.160):datavg[1326] vgdatimestamps +epprd_rg:clvaryonvg(0.160):datavg[vgdatimestamps:201] PS4_FUNC=vgdatimestamps +epprd_rg:clvaryonvg(0.160):datavg[vgdatimestamps:201] typeset PS4_FUNC +epprd_rg:clvaryonvg(0.160):datavg[vgdatimestamps:202] [[ high == high ]] +epprd_rg:clvaryonvg(0.160):datavg[vgdatimestamps:202] set -x +epprd_rg:clvaryonvg(0.160):datavg[vgdatimestamps:203] set -u +epprd_rg:clvaryonvg(0.160):datavg[vgdatimestamps:206] : See what timestamp LVM has recorded from the last time it checked +epprd_rg:clvaryonvg(0.160):datavg[vgdatimestamps:207] : the disks +epprd_rg:clvaryonvg(0.160):datavg[vgdatimestamps:209] /usr/sbin/getlvodm -T 00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(0.161):datavg[vgdatimestamps:209] 2> /dev/null +epprd_rg:clvaryonvg(0.164):datavg[vgdatimestamps:209] TS_FROM_ODM=63d4fdf51cba9f18 +epprd_rg:clvaryonvg(0.164):datavg[vgdatimestamps:212] : Check to see if HACMP is maintaining a timestamp for this volume group +epprd_rg:clvaryonvg(0.164):datavg[vgdatimestamps:213] : Needed for some older volume groups +epprd_rg:clvaryonvg(0.164):datavg[vgdatimestamps:215] [[ -s /usr/es/sbin/cluster/etc/vg/datavg.tstamp ]] +epprd_rg:clvaryonvg(0.164):datavg[vgdatimestamps:234] : Get the time stamp from the actual disk +epprd_rg:clvaryonvg(0.164):datavg[vgdatimestamps:236] clvgdats /dev/datavg +epprd_rg:clvaryonvg(0.165):datavg[vgdatimestamps:236] 2> /dev/null +epprd_rg:clvaryonvg(0.174):datavg[vgdatimestamps:236] TS_FROM_DISK=63d4fdf51cba9f18 +epprd_rg:clvaryonvg(0.174):datavg[vgdatimestamps:237] clvgdats_rc=0 +epprd_rg:clvaryonvg(0.174):datavg[vgdatimestamps:238] (( 0 != 0 )) +epprd_rg:clvaryonvg(0.174):datavg[vgdatimestamps:247] [[ -z 63d4fdf51cba9f18 ]] +epprd_rg:clvaryonvg(0.174):datavg[1328] [[ 63d4fdf51cba9f18 != 63d4fdf51cba9f18 ]] +epprd_rg:clvaryonvg(0.174):datavg[1344] : There is a chance that a VG that should be in passive mode is not. +epprd_rg:clvaryonvg(0.174):datavg[1345] : Run cl_pvo to put it in passive mode if possible. +epprd_rg:clvaryonvg(0.174):datavg[1350] [[ -z passive ]] +epprd_rg:clvaryonvg(0.174):datavg[1350] [[ passive == ordinary ]] +epprd_rg:clvaryonvg(0.174):datavg[1350] [[ passive == passive ]] +epprd_rg:clvaryonvg(0.174):datavg[1350] [[ -n '' ]] +epprd_rg:clvaryonvg(0.174):datavg[1381] : Let us assume that the old style synclvodm would sync all the PV/FS changes. +epprd_rg:clvaryonvg(0.174):datavg[1383] expimpvg_notrequired=1 +epprd_rg:clvaryonvg(0.174):datavg[1386] : Optimistically give varyonvg a try. +epprd_rg:clvaryonvg(0.175):datavg[1388] [[ passive == passive ]] +epprd_rg:clvaryonvg(0.175):datavg[1391] : If the volume group was varyd on in passive mode when this node came +epprd_rg:clvaryonvg(0.175):datavg[1392] : up, flip it over to active mode. Following logic will then fall +epprd_rg:clvaryonvg(0.175):datavg[1393] : through to updatefs. +epprd_rg:clvaryonvg(0.175):datavg[1395] [[ passive == passive ]] +epprd_rg:clvaryonvg(0.175):datavg[1395] A_flag=-A +epprd_rg:clvaryonvg(0.175):datavg[1396] varyonvg -n -c -A -O datavg +epprd_rg:clvaryonvg(0.176):datavg[1396] 2>& 1 +epprd_rg:clvaryonvg(0.767):datavg[1396] varyonvg_output='' +epprd_rg:clvaryonvg(0.767):datavg[1397] varyonvg_rc=0 +epprd_rg:clvaryonvg(0.767):datavg[1397] typeset -li varyonvg_rc +epprd_rg:clvaryonvg(0.767):datavg[1399] (( 0 != 0 )) +epprd_rg:clvaryonvg(0.767):datavg[1481] (( 0 != 0 )) +epprd_rg:clvaryonvg(0.767):datavg[1576] : At this point, datavg should be varied on +epprd_rg:clvaryonvg(0.767):datavg[1578] [[ FALSE == TRUE ]] +epprd_rg:clvaryonvg(0.767):datavg[1585] [[ -z 63d4fdf51cba9f18 ]] +epprd_rg:clvaryonvg(0.767):datavg[1592] vgdatimestamps +epprd_rg:clvaryonvg(0.767):datavg[vgdatimestamps:201] PS4_FUNC=vgdatimestamps +epprd_rg:clvaryonvg(0.767):datavg[vgdatimestamps:201] typeset PS4_FUNC +epprd_rg:clvaryonvg(0.767):datavg[vgdatimestamps:202] [[ high == high ]] +epprd_rg:clvaryonvg(0.767):datavg[vgdatimestamps:202] set -x +epprd_rg:clvaryonvg(0.767):datavg[vgdatimestamps:203] set -u +epprd_rg:clvaryonvg(0.767):datavg[vgdatimestamps:206] : See what timestamp LVM has recorded from the last time it checked +epprd_rg:clvaryonvg(0.767):datavg[vgdatimestamps:207] : the disks +epprd_rg:clvaryonvg(0.767):datavg[vgdatimestamps:209] /usr/sbin/getlvodm -T 00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(0.768):datavg[vgdatimestamps:209] 2> /dev/null +epprd_rg:clvaryonvg(0.771):datavg[vgdatimestamps:209] TS_FROM_ODM=63d4fdff1617c6e2 +epprd_rg:clvaryonvg(0.771):datavg[vgdatimestamps:212] : Check to see if HACMP is maintaining a timestamp for this volume group +epprd_rg:clvaryonvg(0.771):datavg[vgdatimestamps:213] : Needed for some older volume groups +epprd_rg:clvaryonvg(0.771):datavg[vgdatimestamps:215] [[ -s /usr/es/sbin/cluster/etc/vg/datavg.tstamp ]] +epprd_rg:clvaryonvg(0.771):datavg[vgdatimestamps:234] : Get the time stamp from the actual disk +epprd_rg:clvaryonvg(0.771):datavg[vgdatimestamps:236] clvgdats /dev/datavg +epprd_rg:clvaryonvg(0.772):datavg[vgdatimestamps:236] 2> /dev/null +epprd_rg:clvaryonvg(0.782):datavg[vgdatimestamps:236] TS_FROM_DISK=63d4fdff1617c6e2 +epprd_rg:clvaryonvg(0.782):datavg[vgdatimestamps:237] clvgdats_rc=0 +epprd_rg:clvaryonvg(0.782):datavg[vgdatimestamps:238] (( 0 != 0 )) +epprd_rg:clvaryonvg(0.782):datavg[vgdatimestamps:247] [[ -z 63d4fdff1617c6e2 ]] +epprd_rg:clvaryonvg(0.782):datavg[1600] [[ 63d4fdff1617c6e2 != 63d4fdff1617c6e2 ]] +epprd_rg:clvaryonvg(0.782):datavg[1622] [[ FALSE == TRUE ]] +epprd_rg:clvaryonvg(0.782):datavg[1633] : Even if everything looks OK, update the local file system +epprd_rg:clvaryonvg(0.782):datavg[1634] : definitions, since changes there do not show up in the +epprd_rg:clvaryonvg(0.782):datavg[1635] : VGDA timestamps +epprd_rg:clvaryonvg(0.782):datavg[1637] updatefs datavg +epprd_rg:clvaryonvg(0.782):datavg[updatefs:506] PS4_FUNC=updatefs +epprd_rg:clvaryonvg(0.782):datavg[updatefs:506] typeset PS4_FUNC +epprd_rg:clvaryonvg(0.782):datavg[updatefs:507] [[ high == high ]] +epprd_rg:clvaryonvg(0.782):datavg[updatefs:507] set -x +epprd_rg:clvaryonvg(0.782):datavg[updatefs:508] do_imfs='' +epprd_rg:clvaryonvg(0.782):datavg[updatefs:508] typeset do_imfs +epprd_rg:clvaryonvg(0.782):datavg[updatefs:509] has_typed_lvs='' +epprd_rg:clvaryonvg(0.782):datavg[updatefs:509] typeset has_typed_lvs +epprd_rg:clvaryonvg(0.782):datavg[updatefs:512] : Delete existing filesystem information for this volume group. This is +epprd_rg:clvaryonvg(0.782):datavg[updatefs:513] : needed because imfs will not update an existing /etc/filesystems entry. +epprd_rg:clvaryonvg(0.784):datavg[updatefs:515] cut -f1 '-d ' +epprd_rg:clvaryonvg(0.784):datavg[updatefs:515] /usr/sbin/getlvodm -L datavg +epprd_rg:clvaryonvg(0.788):datavg[updatefs:515] lv_list=$'saplv\nsapmntlv\noraclelv\nepplv\noraarchlv\nsapdata1lv\nsapdata2lv\nsapdata3lv\nsapdata4lv\nboardlv\noriglogAlv\noriglogBlv\nmirrlogAlv\nmirrlogBlv\nepprdaloglv' +epprd_rg:clvaryonvg(0.788):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.788):datavg[updatefs:521] clodmget -q 'name = saplv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.791):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.791):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.791):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.791):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.791):datavg[updatefs:530] /usr/sbin/getlvcb -f saplv +epprd_rg:clvaryonvg(0.792):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.811):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.811):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.811):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.812):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.812):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.816):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.816):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.816):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.816):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.817):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.837):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.837):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.837):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.837):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.838):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.838):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.841):datavg[updatefs:545] /usr/sbin/imfs -lx saplv +epprd_rg:clvaryonvg(0.846):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.846):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.846):datavg[updatefs:521] clodmget -q 'name = sapmntlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.849):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.849):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.849):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.849):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.849):datavg[updatefs:530] /usr/sbin/getlvcb -f sapmntlv +epprd_rg:clvaryonvg(0.850):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.869):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.869):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.869):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.870):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.870):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.874):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.874):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.874):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.874):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.875):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.894):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.894):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.894):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.894):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.895):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.895):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.898):datavg[updatefs:545] /usr/sbin/imfs -lx sapmntlv +epprd_rg:clvaryonvg(0.903):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.903):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.903):datavg[updatefs:521] clodmget -q 'name = oraclelv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.906):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.906):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.906):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.906):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.906):datavg[updatefs:530] /usr/sbin/getlvcb -f oraclelv +epprd_rg:clvaryonvg(0.907):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.926):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.926):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.926):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.927):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.928):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.931):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.931):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.931):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.931):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.933):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.952):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.952):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.952):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.952):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.953):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.953):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.956):datavg[updatefs:545] /usr/sbin/imfs -lx oraclelv +epprd_rg:clvaryonvg(0.960):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.960):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.960):datavg[updatefs:521] clodmget -q 'name = epplv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.964):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.964):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.964):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.964):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.964):datavg[updatefs:530] /usr/sbin/getlvcb -f epplv +epprd_rg:clvaryonvg(0.965):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.983):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.983):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.983):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.985):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.984):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.989):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.989):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.989):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.989):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.991):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.009):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.009):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.009):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.009):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.010):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.011):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.014):datavg[updatefs:545] /usr/sbin/imfs -lx epplv +epprd_rg:clvaryonvg(1.018):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.018):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.018):datavg[updatefs:521] clodmget -q 'name = oraarchlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.021):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.021):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.021):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.021):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.021):datavg[updatefs:530] /usr/sbin/getlvcb -f oraarchlv +epprd_rg:clvaryonvg(1.022):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.039):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.040):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.040):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.041):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(1.043):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.046):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.046):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.046):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.046):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.047):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.065):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.065):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.065):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.065):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.066):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.067):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.070):datavg[updatefs:545] /usr/sbin/imfs -lx oraarchlv +epprd_rg:clvaryonvg(1.074):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.074):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.074):datavg[updatefs:521] clodmget -q 'name = sapdata1lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.078):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.078):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.078):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.078):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.078):datavg[updatefs:530] /usr/sbin/getlvcb -f sapdata1lv +epprd_rg:clvaryonvg(1.079):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.095):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.095):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.095):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.097):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(1.098):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.101):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.101):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.101):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.101):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.103):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.121):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.121):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.121):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.121):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.122):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.123):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.126):datavg[updatefs:545] /usr/sbin/imfs -lx sapdata1lv +epprd_rg:clvaryonvg(1.130):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.130):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.130):datavg[updatefs:521] clodmget -q 'name = sapdata2lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.133):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.133):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.134):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.134):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.134):datavg[updatefs:530] /usr/sbin/getlvcb -f sapdata2lv +epprd_rg:clvaryonvg(1.134):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.153):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.153):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.153):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.154):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(1.155):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.159):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.159):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.159):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.159):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.160):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.178):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.178):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.178):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.178):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.179):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.180):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.183):datavg[updatefs:545] /usr/sbin/imfs -lx sapdata2lv +epprd_rg:clvaryonvg(1.187):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.187):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.187):datavg[updatefs:521] clodmget -q 'name = sapdata3lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.190):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.190):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.190):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.190):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.191):datavg[updatefs:530] /usr/sbin/getlvcb -f sapdata3lv +epprd_rg:clvaryonvg(1.191):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.208):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.208):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.208):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.210):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(1.211):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.214):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.214):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.214):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.214):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.215):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.233):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.233):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.233):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.233):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.234):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.235):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.238):datavg[updatefs:545] /usr/sbin/imfs -lx sapdata3lv +epprd_rg:clvaryonvg(1.242):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.242):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.242):datavg[updatefs:521] clodmget -q 'name = sapdata4lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.245):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.245):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.245):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.245):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.246):datavg[updatefs:530] /usr/sbin/getlvcb -f sapdata4lv +epprd_rg:clvaryonvg(1.246):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.263):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.263):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.263):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.264):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(1.265):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.269):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.269):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.269):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.269):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.270):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.288):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.288):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.288):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.288):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.289):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.290):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.293):datavg[updatefs:545] /usr/sbin/imfs -lx sapdata4lv +epprd_rg:clvaryonvg(1.297):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.297):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.297):datavg[updatefs:521] clodmget -q 'name = boardlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.300):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.300):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.300):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.300):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.300):datavg[updatefs:530] /usr/sbin/getlvcb -f boardlv +epprd_rg:clvaryonvg(1.301):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.318):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.318):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.318):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.319):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(1.320):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.324):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.324):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.324):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.324):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.325):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.343):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.343):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.343):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.343):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.344):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.345):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.348):datavg[updatefs:545] /usr/sbin/imfs -lx boardlv +epprd_rg:clvaryonvg(1.352):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.352):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.352):datavg[updatefs:521] clodmget -q 'name = origlogAlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.355):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.355):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.355):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.355):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.355):datavg[updatefs:530] /usr/sbin/getlvcb -f origlogAlv +epprd_rg:clvaryonvg(1.356):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.373):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.373):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.373):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.375):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.375):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(1.379):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.379):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.379):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.379):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.381):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.398):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.398):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.398):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.398):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.400):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.401):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.404):datavg[updatefs:545] /usr/sbin/imfs -lx origlogAlv +epprd_rg:clvaryonvg(1.407):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.407):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.407):datavg[updatefs:521] clodmget -q 'name = origlogBlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.411):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.411):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.411):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.411):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.411):datavg[updatefs:530] /usr/sbin/getlvcb -f origlogBlv +epprd_rg:clvaryonvg(1.412):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.428):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.428):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.428):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.430):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(1.431):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.434):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.434):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.435):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.435):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.436):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.453):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.453):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.453):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.453):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.454):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.456):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.458):datavg[updatefs:545] /usr/sbin/imfs -lx origlogBlv +epprd_rg:clvaryonvg(1.462):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.462):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.463):datavg[updatefs:521] clodmget -q 'name = mirrlogAlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.466):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.466):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.466):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.466):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.466):datavg[updatefs:530] /usr/sbin/getlvcb -f mirrlogAlv +epprd_rg:clvaryonvg(1.467):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.487):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.487):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.487):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.489):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(1.490):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.493):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.493):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.493):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.493):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.494):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.512):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.512):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.512):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.512):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.513):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.514):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.517):datavg[updatefs:545] /usr/sbin/imfs -lx mirrlogAlv +epprd_rg:clvaryonvg(1.521):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.521):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.521):datavg[updatefs:521] clodmget -q 'name = mirrlogBlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.524):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.524):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.524):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.524):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.524):datavg[updatefs:530] /usr/sbin/getlvcb -f mirrlogBlv +epprd_rg:clvaryonvg(1.525):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.542):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.542):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.542):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.544):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(1.545):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.548):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.549):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.549):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.549):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.550):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.567):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.568):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.568):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.568):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.569):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.570):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.573):datavg[updatefs:545] /usr/sbin/imfs -lx mirrlogBlv +epprd_rg:clvaryonvg(1.576):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.577):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.577):datavg[updatefs:521] clodmget -q 'name = epprdaloglv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.580):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.580):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.580):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.580):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.580):datavg[updatefs:530] /usr/sbin/getlvcb -f epprdaloglv +epprd_rg:clvaryonvg(1.581):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.597):datavg[updatefs:530] fs_info=' ' +epprd_rg:clvaryonvg(1.597):datavg[updatefs:531] [[ -n ' ' ]] +epprd_rg:clvaryonvg(1.597):datavg[updatefs:531] [[ ' ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.597):datavg[updatefs:552] [[ -n true ]] +epprd_rg:clvaryonvg(1.597):datavg[updatefs:556] : Pick up any file system changes that may have happened when +epprd_rg:clvaryonvg(1.597):datavg[updatefs:557] : the volume group was owned by another node. That is, if a +epprd_rg:clvaryonvg(1.597):datavg[updatefs:558] : local change was made - not through C-SPOC, we whould have no +epprd_rg:clvaryonvg(1.597):datavg[updatefs:559] : indication it happened. +epprd_rg:clvaryonvg(1.597):datavg[updatefs:561] [[ -z '' ]] +epprd_rg:clvaryonvg(1.597):datavg[updatefs:563] /usr/sbin/imfs datavg +epprd_rg:clvaryonvg(2.278):datavg[updatefs:589] : For a valid file system configuration, the mount point in +epprd_rg:clvaryonvg(2.278):datavg[updatefs:590] : /etc/filesystems for the logical volume should match the +epprd_rg:clvaryonvg(2.278):datavg[updatefs:591] : label of the logical volume. The above imfs should have +epprd_rg:clvaryonvg(2.278):datavg[updatefs:592] : matched those two. Now, check that they match the label +epprd_rg:clvaryonvg(2.278):datavg[updatefs:593] : for the logical volume as saved in ODM. +epprd_rg:clvaryonvg(2.278):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.278):datavg[updatefs:600] clodmget -q 'name = saplv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.282):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.282):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.282):datavg[updatefs:607] /usr/sbin/getlvcb -f saplv +epprd_rg:clvaryonvg(2.300):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.300):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.300):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.300):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.300):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.300):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.300):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.300):datavg[updatefs:623] : Label and file system type from LVCB on disk for saplv +epprd_rg:clvaryonvg(2.301):datavg[updatefs:625] getlvcb -T -A saplv +epprd_rg:clvaryonvg(2.301):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.305):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.308):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.310):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.322):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.322):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.322):datavg[updatefs:632] : Mount point in /etc/filesystems for saplv +epprd_rg:clvaryonvg(2.324):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/saplv$' /etc/filesystems +epprd_rg:clvaryonvg(2.326):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.328):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.331):datavg[updatefs:634] fs_mount_point=/usr/sap +epprd_rg:clvaryonvg(2.331):datavg[updatefs:637] : CuAt label attribute for saplv +epprd_rg:clvaryonvg(2.331):datavg[updatefs:639] clodmget -q 'name = saplv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.335):datavg[updatefs:639] CuAt_label=/usr/sap +epprd_rg:clvaryonvg(2.336):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.337):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.341):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.341):datavg[updatefs:657] [[ -z /usr/sap ]] +epprd_rg:clvaryonvg(2.341):datavg[updatefs:657] [[ /usr/sap == None ]] +epprd_rg:clvaryonvg(2.341):datavg[updatefs:665] [[ /usr/sap == /usr/sap ]] +epprd_rg:clvaryonvg(2.341):datavg[updatefs:665] [[ /usr/sap != /usr/sap ]] +epprd_rg:clvaryonvg(2.341):datavg[updatefs:685] [[ /usr/sap != /usr/sap ]] +epprd_rg:clvaryonvg(2.341):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.341):datavg[updatefs:600] clodmget -q 'name = sapmntlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.344):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.344):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.344):datavg[updatefs:607] /usr/sbin/getlvcb -f sapmntlv +epprd_rg:clvaryonvg(2.362):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.363):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.363):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.363):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.363):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.363):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.363):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.363):datavg[updatefs:623] : Label and file system type from LVCB on disk for sapmntlv +epprd_rg:clvaryonvg(2.364):datavg[updatefs:625] getlvcb -T -A sapmntlv +epprd_rg:clvaryonvg(2.364):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.367):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.370):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.372):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.385):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.385):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.385):datavg[updatefs:632] : Mount point in /etc/filesystems for sapmntlv +epprd_rg:clvaryonvg(2.387):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/sapmntlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.389):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.391):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.394):datavg[updatefs:634] fs_mount_point=/sapmnt +epprd_rg:clvaryonvg(2.394):datavg[updatefs:637] : CuAt label attribute for sapmntlv +epprd_rg:clvaryonvg(2.394):datavg[updatefs:639] clodmget -q 'name = sapmntlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.397):datavg[updatefs:639] CuAt_label=/sapmnt +epprd_rg:clvaryonvg(2.399):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.400):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.403):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.403):datavg[updatefs:657] [[ -z /sapmnt ]] +epprd_rg:clvaryonvg(2.403):datavg[updatefs:657] [[ /sapmnt == None ]] +epprd_rg:clvaryonvg(2.403):datavg[updatefs:665] [[ /sapmnt == /sapmnt ]] +epprd_rg:clvaryonvg(2.403):datavg[updatefs:665] [[ /sapmnt != /sapmnt ]] +epprd_rg:clvaryonvg(2.403):datavg[updatefs:685] [[ /sapmnt != /sapmnt ]] +epprd_rg:clvaryonvg(2.403):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.403):datavg[updatefs:600] clodmget -q 'name = oraclelv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.407):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.407):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.407):datavg[updatefs:607] /usr/sbin/getlvcb -f oraclelv +epprd_rg:clvaryonvg(2.425):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.425):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.425):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.425):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.425):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.425):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.425):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.425):datavg[updatefs:623] : Label and file system type from LVCB on disk for oraclelv +epprd_rg:clvaryonvg(2.426):datavg[updatefs:625] getlvcb -T -A oraclelv +epprd_rg:clvaryonvg(2.426):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.429):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.432):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.434):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.447):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.447):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.447):datavg[updatefs:632] : Mount point in /etc/filesystems for oraclelv +epprd_rg:clvaryonvg(2.449):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/oraclelv$' /etc/filesystems +epprd_rg:clvaryonvg(2.451):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.453):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.456):datavg[updatefs:634] fs_mount_point=/oracle +epprd_rg:clvaryonvg(2.456):datavg[updatefs:637] : CuAt label attribute for oraclelv +epprd_rg:clvaryonvg(2.456):datavg[updatefs:639] clodmget -q 'name = oraclelv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.460):datavg[updatefs:639] CuAt_label=/oracle +epprd_rg:clvaryonvg(2.462):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.463):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.466):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.466):datavg[updatefs:657] [[ -z /oracle ]] +epprd_rg:clvaryonvg(2.466):datavg[updatefs:657] [[ /oracle == None ]] +epprd_rg:clvaryonvg(2.466):datavg[updatefs:665] [[ /oracle == /oracle ]] +epprd_rg:clvaryonvg(2.466):datavg[updatefs:665] [[ /oracle != /oracle ]] +epprd_rg:clvaryonvg(2.466):datavg[updatefs:685] [[ /oracle != /oracle ]] +epprd_rg:clvaryonvg(2.466):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.466):datavg[updatefs:600] clodmget -q 'name = epplv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.469):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.469):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.469):datavg[updatefs:607] /usr/sbin/getlvcb -f epplv +epprd_rg:clvaryonvg(2.488):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.488):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.488):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.488):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.488):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.488):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.488):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.488):datavg[updatefs:623] : Label and file system type from LVCB on disk for epplv +epprd_rg:clvaryonvg(2.489):datavg[updatefs:625] getlvcb -T -A epplv +epprd_rg:clvaryonvg(2.489):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.492):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.495):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.497):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.510):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.510):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.510):datavg[updatefs:632] : Mount point in /etc/filesystems for epplv +epprd_rg:clvaryonvg(2.512):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/epplv$' /etc/filesystems +epprd_rg:clvaryonvg(2.515):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.514):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.519):datavg[updatefs:634] fs_mount_point=/oracle/EPP +epprd_rg:clvaryonvg(2.519):datavg[updatefs:637] : CuAt label attribute for epplv +epprd_rg:clvaryonvg(2.519):datavg[updatefs:639] clodmget -q 'name = epplv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.523):datavg[updatefs:639] CuAt_label=/oracle/EPP +epprd_rg:clvaryonvg(2.525):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.524):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.529):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.529):datavg[updatefs:657] [[ -z /oracle/EPP ]] +epprd_rg:clvaryonvg(2.529):datavg[updatefs:657] [[ /oracle/EPP == None ]] +epprd_rg:clvaryonvg(2.529):datavg[updatefs:665] [[ /oracle/EPP == /oracle/EPP ]] +epprd_rg:clvaryonvg(2.529):datavg[updatefs:665] [[ /oracle/EPP != /oracle/EPP ]] +epprd_rg:clvaryonvg(2.529):datavg[updatefs:685] [[ /oracle/EPP != /oracle/EPP ]] +epprd_rg:clvaryonvg(2.529):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.529):datavg[updatefs:600] clodmget -q 'name = oraarchlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.533):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.533):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.533):datavg[updatefs:607] /usr/sbin/getlvcb -f oraarchlv +epprd_rg:clvaryonvg(2.550):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.550):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.550):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.550):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.550):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.550):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.550):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.550):datavg[updatefs:623] : Label and file system type from LVCB on disk for oraarchlv +epprd_rg:clvaryonvg(2.551):datavg[updatefs:625] getlvcb -T -A oraarchlv +epprd_rg:clvaryonvg(2.551):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.555):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.558):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.560):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.573):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.573):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.573):datavg[updatefs:632] : Mount point in /etc/filesystems for oraarchlv +epprd_rg:clvaryonvg(2.576):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.574):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/oraarchlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.578):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.582):datavg[updatefs:634] fs_mount_point=/oracle/EPP/oraarch +epprd_rg:clvaryonvg(2.582):datavg[updatefs:637] : CuAt label attribute for oraarchlv +epprd_rg:clvaryonvg(2.582):datavg[updatefs:639] clodmget -q 'name = oraarchlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.585):datavg[updatefs:639] CuAt_label=/oracle/EPP/oraarch +epprd_rg:clvaryonvg(2.587):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.588):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.591):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.591):datavg[updatefs:657] [[ -z /oracle/EPP/oraarch ]] +epprd_rg:clvaryonvg(2.591):datavg[updatefs:657] [[ /oracle/EPP/oraarch == None ]] +epprd_rg:clvaryonvg(2.591):datavg[updatefs:665] [[ /oracle/EPP/oraarch == /oracle/EPP/oraarch ]] +epprd_rg:clvaryonvg(2.591):datavg[updatefs:665] [[ /oracle/EPP/oraarch != /oracle/EPP/oraarch ]] +epprd_rg:clvaryonvg(2.591):datavg[updatefs:685] [[ /oracle/EPP/oraarch != /oracle/EPP/oraarch ]] +epprd_rg:clvaryonvg(2.591):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.591):datavg[updatefs:600] clodmget -q 'name = sapdata1lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.595):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.595):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.595):datavg[updatefs:607] /usr/sbin/getlvcb -f sapdata1lv +epprd_rg:clvaryonvg(2.612):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.612):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.612):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.612):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.612):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.612):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.612):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.612):datavg[updatefs:623] : Label and file system type from LVCB on disk for sapdata1lv +epprd_rg:clvaryonvg(2.613):datavg[updatefs:625] getlvcb -T -A sapdata1lv +epprd_rg:clvaryonvg(2.613):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.617):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.620):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.622):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.634):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.634):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.634):datavg[updatefs:632] : Mount point in /etc/filesystems for sapdata1lv +epprd_rg:clvaryonvg(2.636):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/sapdata1lv$' /etc/filesystems +epprd_rg:clvaryonvg(2.638):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.640):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.643):datavg[updatefs:634] fs_mount_point=/oracle/EPP/sapdata1 +epprd_rg:clvaryonvg(2.643):datavg[updatefs:637] : CuAt label attribute for sapdata1lv +epprd_rg:clvaryonvg(2.643):datavg[updatefs:639] clodmget -q 'name = sapdata1lv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.647):datavg[updatefs:639] CuAt_label=/oracle/EPP/sapdata1 +epprd_rg:clvaryonvg(2.648):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.649):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.652):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.652):datavg[updatefs:657] [[ -z /oracle/EPP/sapdata1 ]] +epprd_rg:clvaryonvg(2.652):datavg[updatefs:657] [[ /oracle/EPP/sapdata1 == None ]] +epprd_rg:clvaryonvg(2.652):datavg[updatefs:665] [[ /oracle/EPP/sapdata1 == /oracle/EPP/sapdata1 ]] +epprd_rg:clvaryonvg(2.653):datavg[updatefs:665] [[ /oracle/EPP/sapdata1 != /oracle/EPP/sapdata1 ]] +epprd_rg:clvaryonvg(2.653):datavg[updatefs:685] [[ /oracle/EPP/sapdata1 != /oracle/EPP/sapdata1 ]] +epprd_rg:clvaryonvg(2.653):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.653):datavg[updatefs:600] clodmget -q 'name = sapdata2lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.656):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.656):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.656):datavg[updatefs:607] /usr/sbin/getlvcb -f sapdata2lv +epprd_rg:clvaryonvg(2.674):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.674):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.674):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.674):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.674):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.674):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.674):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.674):datavg[updatefs:623] : Label and file system type from LVCB on disk for sapdata2lv +epprd_rg:clvaryonvg(2.675):datavg[updatefs:625] getlvcb -T -A sapdata2lv +epprd_rg:clvaryonvg(2.675):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.678):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.681):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.683):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.697):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.697):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.697):datavg[updatefs:632] : Mount point in /etc/filesystems for sapdata2lv +epprd_rg:clvaryonvg(2.698):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/sapdata2lv$' /etc/filesystems +epprd_rg:clvaryonvg(2.701):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.702):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.705):datavg[updatefs:634] fs_mount_point=/oracle/EPP/sapdata2 +epprd_rg:clvaryonvg(2.705):datavg[updatefs:637] : CuAt label attribute for sapdata2lv +epprd_rg:clvaryonvg(2.705):datavg[updatefs:639] clodmget -q 'name = sapdata2lv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.709):datavg[updatefs:639] CuAt_label=/oracle/EPP/sapdata2 +epprd_rg:clvaryonvg(2.710):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.711):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.715):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.715):datavg[updatefs:657] [[ -z /oracle/EPP/sapdata2 ]] +epprd_rg:clvaryonvg(2.715):datavg[updatefs:657] [[ /oracle/EPP/sapdata2 == None ]] +epprd_rg:clvaryonvg(2.715):datavg[updatefs:665] [[ /oracle/EPP/sapdata2 == /oracle/EPP/sapdata2 ]] +epprd_rg:clvaryonvg(2.715):datavg[updatefs:665] [[ /oracle/EPP/sapdata2 != /oracle/EPP/sapdata2 ]] +epprd_rg:clvaryonvg(2.715):datavg[updatefs:685] [[ /oracle/EPP/sapdata2 != /oracle/EPP/sapdata2 ]] +epprd_rg:clvaryonvg(2.715):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.715):datavg[updatefs:600] clodmget -q 'name = sapdata3lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.718):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.718):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.718):datavg[updatefs:607] /usr/sbin/getlvcb -f sapdata3lv +epprd_rg:clvaryonvg(2.736):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.736):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.736):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.736):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.736):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.736):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.736):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.736):datavg[updatefs:623] : Label and file system type from LVCB on disk for sapdata3lv +epprd_rg:clvaryonvg(2.737):datavg[updatefs:625] getlvcb -T -A sapdata3lv +epprd_rg:clvaryonvg(2.737):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.741):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.743):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.745):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.759):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.759):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.759):datavg[updatefs:632] : Mount point in /etc/filesystems for sapdata3lv +epprd_rg:clvaryonvg(2.761):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/sapdata3lv$' /etc/filesystems +epprd_rg:clvaryonvg(2.764):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.765):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.768):datavg[updatefs:634] fs_mount_point=/oracle/EPP/sapdata3 +epprd_rg:clvaryonvg(2.768):datavg[updatefs:637] : CuAt label attribute for sapdata3lv +epprd_rg:clvaryonvg(2.768):datavg[updatefs:639] clodmget -q 'name = sapdata3lv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.772):datavg[updatefs:639] CuAt_label=/oracle/EPP/sapdata3 +epprd_rg:clvaryonvg(2.773):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.774):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.778):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.778):datavg[updatefs:657] [[ -z /oracle/EPP/sapdata3 ]] +epprd_rg:clvaryonvg(2.778):datavg[updatefs:657] [[ /oracle/EPP/sapdata3 == None ]] +epprd_rg:clvaryonvg(2.778):datavg[updatefs:665] [[ /oracle/EPP/sapdata3 == /oracle/EPP/sapdata3 ]] +epprd_rg:clvaryonvg(2.778):datavg[updatefs:665] [[ /oracle/EPP/sapdata3 != /oracle/EPP/sapdata3 ]] +epprd_rg:clvaryonvg(2.778):datavg[updatefs:685] [[ /oracle/EPP/sapdata3 != /oracle/EPP/sapdata3 ]] +epprd_rg:clvaryonvg(2.778):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.778):datavg[updatefs:600] clodmget -q 'name = sapdata4lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.781):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.781):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.781):datavg[updatefs:607] /usr/sbin/getlvcb -f sapdata4lv +epprd_rg:clvaryonvg(2.799):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.799):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.799):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.799):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.799):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.799):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.799):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.799):datavg[updatefs:623] : Label and file system type from LVCB on disk for sapdata4lv +epprd_rg:clvaryonvg(2.800):datavg[updatefs:625] getlvcb -T -A sapdata4lv +epprd_rg:clvaryonvg(2.800):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.804):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.807):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.809):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.821):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.821):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.821):datavg[updatefs:632] : Mount point in /etc/filesystems for sapdata4lv +epprd_rg:clvaryonvg(2.823):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/sapdata4lv$' /etc/filesystems +epprd_rg:clvaryonvg(2.825):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.827):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.830):datavg[updatefs:634] fs_mount_point=/oracle/EPP/sapdata4 +epprd_rg:clvaryonvg(2.830):datavg[updatefs:637] : CuAt label attribute for sapdata4lv +epprd_rg:clvaryonvg(2.830):datavg[updatefs:639] clodmget -q 'name = sapdata4lv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.833):datavg[updatefs:639] CuAt_label=/oracle/EPP/sapdata4 +epprd_rg:clvaryonvg(2.835):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.836):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.839):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.839):datavg[updatefs:657] [[ -z /oracle/EPP/sapdata4 ]] +epprd_rg:clvaryonvg(2.839):datavg[updatefs:657] [[ /oracle/EPP/sapdata4 == None ]] +epprd_rg:clvaryonvg(2.839):datavg[updatefs:665] [[ /oracle/EPP/sapdata4 == /oracle/EPP/sapdata4 ]] +epprd_rg:clvaryonvg(2.839):datavg[updatefs:665] [[ /oracle/EPP/sapdata4 != /oracle/EPP/sapdata4 ]] +epprd_rg:clvaryonvg(2.839):datavg[updatefs:685] [[ /oracle/EPP/sapdata4 != /oracle/EPP/sapdata4 ]] +epprd_rg:clvaryonvg(2.840):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.840):datavg[updatefs:600] clodmget -q 'name = boardlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.843):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.843):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.843):datavg[updatefs:607] /usr/sbin/getlvcb -f boardlv +epprd_rg:clvaryonvg(2.861):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.861):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.861):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.861):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.861):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.861):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.861):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.861):datavg[updatefs:623] : Label and file system type from LVCB on disk for boardlv +epprd_rg:clvaryonvg(2.862):datavg[updatefs:625] getlvcb -T -A boardlv +epprd_rg:clvaryonvg(2.862):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.865):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.868):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.870):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.883):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.883):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.883):datavg[updatefs:632] : Mount point in /etc/filesystems for boardlv +epprd_rg:clvaryonvg(2.885):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.884):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/boardlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.888):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.892):datavg[updatefs:634] fs_mount_point=/board_org +epprd_rg:clvaryonvg(2.892):datavg[updatefs:637] : CuAt label attribute for boardlv +epprd_rg:clvaryonvg(2.892):datavg[updatefs:639] clodmget -q 'name = boardlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.895):datavg[updatefs:639] CuAt_label=/board_org +epprd_rg:clvaryonvg(2.897):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.898):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.901):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.901):datavg[updatefs:657] [[ -z /board_org ]] +epprd_rg:clvaryonvg(2.901):datavg[updatefs:657] [[ /board_org == None ]] +epprd_rg:clvaryonvg(2.901):datavg[updatefs:665] [[ /board_org == /board_org ]] +epprd_rg:clvaryonvg(2.901):datavg[updatefs:665] [[ /board_org != /board_org ]] +epprd_rg:clvaryonvg(2.901):datavg[updatefs:685] [[ /board_org != /board_org ]] +epprd_rg:clvaryonvg(2.901):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.901):datavg[updatefs:600] clodmget -q 'name = origlogAlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.904):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.904):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.905):datavg[updatefs:607] /usr/sbin/getlvcb -f origlogAlv +epprd_rg:clvaryonvg(2.922):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.922):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.922):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.923):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.923):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.923):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.923):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.923):datavg[updatefs:623] : Label and file system type from LVCB on disk for origlogAlv +epprd_rg:clvaryonvg(2.924):datavg[updatefs:625] getlvcb -T -A origlogAlv +epprd_rg:clvaryonvg(2.924):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.927):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.930):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.932):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.945):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.945):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.945):datavg[updatefs:632] : Mount point in /etc/filesystems for origlogAlv +epprd_rg:clvaryonvg(2.947):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/origlogAlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.949):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.951):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.954):datavg[updatefs:634] fs_mount_point=/oracle/EPP/origlogA +epprd_rg:clvaryonvg(2.954):datavg[updatefs:637] : CuAt label attribute for origlogAlv +epprd_rg:clvaryonvg(2.954):datavg[updatefs:639] clodmget -q 'name = origlogAlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.957):datavg[updatefs:639] CuAt_label=/oracle/EPP/origlogA +epprd_rg:clvaryonvg(2.959):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.960):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.963):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.963):datavg[updatefs:657] [[ -z /oracle/EPP/origlogA ]] +epprd_rg:clvaryonvg(2.963):datavg[updatefs:657] [[ /oracle/EPP/origlogA == None ]] +epprd_rg:clvaryonvg(2.963):datavg[updatefs:665] [[ /oracle/EPP/origlogA == /oracle/EPP/origlogA ]] +epprd_rg:clvaryonvg(2.963):datavg[updatefs:665] [[ /oracle/EPP/origlogA != /oracle/EPP/origlogA ]] +epprd_rg:clvaryonvg(2.963):datavg[updatefs:685] [[ /oracle/EPP/origlogA != /oracle/EPP/origlogA ]] +epprd_rg:clvaryonvg(2.963):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.963):datavg[updatefs:600] clodmget -q 'name = origlogBlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.967):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.967):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.967):datavg[updatefs:607] /usr/sbin/getlvcb -f origlogBlv +epprd_rg:clvaryonvg(2.985):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.985):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.985):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.985):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.985):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.985):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.985):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.985):datavg[updatefs:623] : Label and file system type from LVCB on disk for origlogBlv +epprd_rg:clvaryonvg(2.986):datavg[updatefs:625] getlvcb -T -A origlogBlv +epprd_rg:clvaryonvg(2.986):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.989):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.993):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.994):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(3.007):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(3.007):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(3.007):datavg[updatefs:632] : Mount point in /etc/filesystems for origlogBlv +epprd_rg:clvaryonvg(3.009):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/origlogBlv$' /etc/filesystems +epprd_rg:clvaryonvg(3.011):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(3.013):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(3.016):datavg[updatefs:634] fs_mount_point=/oracle/EPP/origlogB +epprd_rg:clvaryonvg(3.016):datavg[updatefs:637] : CuAt label attribute for origlogBlv +epprd_rg:clvaryonvg(3.016):datavg[updatefs:639] clodmget -q 'name = origlogBlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(3.020):datavg[updatefs:639] CuAt_label=/oracle/EPP/origlogB +epprd_rg:clvaryonvg(3.021):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(3.022):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(3.026):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(3.026):datavg[updatefs:657] [[ -z /oracle/EPP/origlogB ]] +epprd_rg:clvaryonvg(3.026):datavg[updatefs:657] [[ /oracle/EPP/origlogB == None ]] +epprd_rg:clvaryonvg(3.026):datavg[updatefs:665] [[ /oracle/EPP/origlogB == /oracle/EPP/origlogB ]] +epprd_rg:clvaryonvg(3.026):datavg[updatefs:665] [[ /oracle/EPP/origlogB != /oracle/EPP/origlogB ]] +epprd_rg:clvaryonvg(3.026):datavg[updatefs:685] [[ /oracle/EPP/origlogB != /oracle/EPP/origlogB ]] +epprd_rg:clvaryonvg(3.026):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(3.026):datavg[updatefs:600] clodmget -q 'name = mirrlogAlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(3.029):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(3.029):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(3.029):datavg[updatefs:607] /usr/sbin/getlvcb -f mirrlogAlv +epprd_rg:clvaryonvg(3.047):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(3.047):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(3.047):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(3.047):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(3.047):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(3.047):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(3.047):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(3.047):datavg[updatefs:623] : Label and file system type from LVCB on disk for mirrlogAlv +epprd_rg:clvaryonvg(3.048):datavg[updatefs:625] getlvcb -T -A mirrlogAlv +epprd_rg:clvaryonvg(3.048):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(3.052):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(3.055):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(3.057):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(3.070):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(3.070):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(3.070):datavg[updatefs:632] : Mount point in /etc/filesystems for mirrlogAlv +epprd_rg:clvaryonvg(3.071):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/mirrlogAlv$' /etc/filesystems +epprd_rg:clvaryonvg(3.074):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(3.075):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(3.079):datavg[updatefs:634] fs_mount_point=/oracle/EPP/mirrlogA +epprd_rg:clvaryonvg(3.079):datavg[updatefs:637] : CuAt label attribute for mirrlogAlv +epprd_rg:clvaryonvg(3.079):datavg[updatefs:639] clodmget -q 'name = mirrlogAlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(3.082):datavg[updatefs:639] CuAt_label=/oracle/EPP/mirrlogA +epprd_rg:clvaryonvg(3.084):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(3.085):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(3.088):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(3.088):datavg[updatefs:657] [[ -z /oracle/EPP/mirrlogA ]] +epprd_rg:clvaryonvg(3.088):datavg[updatefs:657] [[ /oracle/EPP/mirrlogA == None ]] +epprd_rg:clvaryonvg(3.088):datavg[updatefs:665] [[ /oracle/EPP/mirrlogA == /oracle/EPP/mirrlogA ]] +epprd_rg:clvaryonvg(3.088):datavg[updatefs:665] [[ /oracle/EPP/mirrlogA != /oracle/EPP/mirrlogA ]] +epprd_rg:clvaryonvg(3.088):datavg[updatefs:685] [[ /oracle/EPP/mirrlogA != /oracle/EPP/mirrlogA ]] +epprd_rg:clvaryonvg(3.088):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(3.088):datavg[updatefs:600] clodmget -q 'name = mirrlogBlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(3.091):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(3.091):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(3.091):datavg[updatefs:607] /usr/sbin/getlvcb -f mirrlogBlv +epprd_rg:clvaryonvg(3.109):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(3.109):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(3.109):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(3.109):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(3.109):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(3.109):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(3.109):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(3.109):datavg[updatefs:623] : Label and file system type from LVCB on disk for mirrlogBlv +epprd_rg:clvaryonvg(3.110):datavg[updatefs:625] getlvcb -T -A mirrlogBlv +epprd_rg:clvaryonvg(3.111):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(3.114):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(3.117):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(3.119):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(3.131):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(3.131):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(3.131):datavg[updatefs:632] : Mount point in /etc/filesystems for mirrlogBlv +epprd_rg:clvaryonvg(3.133):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/mirrlogBlv$' /etc/filesystems +epprd_rg:clvaryonvg(3.135):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(3.137):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(3.140):datavg[updatefs:634] fs_mount_point=/oracle/EPP/mirrlogB +epprd_rg:clvaryonvg(3.140):datavg[updatefs:637] : CuAt label attribute for mirrlogBlv +epprd_rg:clvaryonvg(3.140):datavg[updatefs:639] clodmget -q 'name = mirrlogBlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(3.144):datavg[updatefs:639] CuAt_label=/oracle/EPP/mirrlogB +epprd_rg:clvaryonvg(3.146):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(3.147):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(3.150):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(3.150):datavg[updatefs:657] [[ -z /oracle/EPP/mirrlogB ]] +epprd_rg:clvaryonvg(3.150):datavg[updatefs:657] [[ /oracle/EPP/mirrlogB == None ]] +epprd_rg:clvaryonvg(3.150):datavg[updatefs:665] [[ /oracle/EPP/mirrlogB == /oracle/EPP/mirrlogB ]] +epprd_rg:clvaryonvg(3.150):datavg[updatefs:665] [[ /oracle/EPP/mirrlogB != /oracle/EPP/mirrlogB ]] +epprd_rg:clvaryonvg(3.150):datavg[updatefs:685] [[ /oracle/EPP/mirrlogB != /oracle/EPP/mirrlogB ]] +epprd_rg:clvaryonvg(3.150):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(3.150):datavg[updatefs:600] clodmget -q 'name = epprdaloglv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(3.153):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(3.153):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(3.153):datavg[updatefs:607] /usr/sbin/getlvcb -f epprdaloglv +epprd_rg:clvaryonvg(3.171):datavg[updatefs:607] fs_info=' ' +epprd_rg:clvaryonvg(3.171):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(3.171):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(3.171):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(3.171):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(3.171):datavg[updatefs:618] [[ -z ' ' ]] +epprd_rg:clvaryonvg(3.171):datavg[updatefs:618] [[ ' ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(3.171):datavg[updatefs:620] continue +epprd_rg:clvaryonvg(3.171):datavg[1641] : At this point, the volume should be varied on, so get the current +epprd_rg:clvaryonvg(3.171):datavg[1642] : timestamp if needed +epprd_rg:clvaryonvg(3.171):datavg[1644] vgdatimestamps +epprd_rg:clvaryonvg(3.171):datavg[vgdatimestamps:201] PS4_FUNC=vgdatimestamps +epprd_rg:clvaryonvg(3.171):datavg[vgdatimestamps:201] typeset PS4_FUNC +epprd_rg:clvaryonvg(3.171):datavg[vgdatimestamps:202] [[ high == high ]] +epprd_rg:clvaryonvg(3.171):datavg[vgdatimestamps:202] set -x +epprd_rg:clvaryonvg(3.171):datavg[vgdatimestamps:203] set -u +epprd_rg:clvaryonvg(3.171):datavg[vgdatimestamps:206] : See what timestamp LVM has recorded from the last time it checked +epprd_rg:clvaryonvg(3.171):datavg[vgdatimestamps:207] : the disks +epprd_rg:clvaryonvg(3.171):datavg[vgdatimestamps:209] /usr/sbin/getlvodm -T 00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(3.172):datavg[vgdatimestamps:209] 2> /dev/null +epprd_rg:clvaryonvg(3.175):datavg[vgdatimestamps:209] TS_FROM_ODM=63d4fdff1617c6e2 +epprd_rg:clvaryonvg(3.175):datavg[vgdatimestamps:212] : Check to see if HACMP is maintaining a timestamp for this volume group +epprd_rg:clvaryonvg(3.175):datavg[vgdatimestamps:213] : Needed for some older volume groups +epprd_rg:clvaryonvg(3.175):datavg[vgdatimestamps:215] [[ -s /usr/es/sbin/cluster/etc/vg/datavg.tstamp ]] +epprd_rg:clvaryonvg(3.175):datavg[vgdatimestamps:234] : Get the time stamp from the actual disk +epprd_rg:clvaryonvg(3.175):datavg[vgdatimestamps:236] clvgdats /dev/datavg +epprd_rg:clvaryonvg(3.176):datavg[vgdatimestamps:236] 2> /dev/null +epprd_rg:clvaryonvg(3.185):datavg[vgdatimestamps:236] TS_FROM_DISK=63d4fdff1617c6e2 +epprd_rg:clvaryonvg(3.185):datavg[vgdatimestamps:237] clvgdats_rc=0 +epprd_rg:clvaryonvg(3.185):datavg[vgdatimestamps:238] (( 0 != 0 )) +epprd_rg:clvaryonvg(3.185):datavg[vgdatimestamps:247] [[ -z 63d4fdff1617c6e2 ]] +epprd_rg:clvaryonvg(3.185):datavg[1645] [[ -z 63d4fdff1617c6e2 ]] +epprd_rg:clvaryonvg(3.185):datavg[1656] : Finally, leave the volume in the requested state - on or off +epprd_rg:clvaryonvg(3.185):datavg[1658] [[ FALSE == TRUE ]] +epprd_rg:clvaryonvg(3.185):datavg[1665] (( 0 == 0 )) +epprd_rg:clvaryonvg(3.185):datavg[1668] : Synchronize time stamps globally +epprd_rg:clvaryonvg(3.185):datavg[1670] cl_update_vg_odm_ts -o datavg +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[77] version=1.13 +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[121] o_flag='' +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[122] f_flag='' +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[123] getopts :of option +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[126] : Local timestamps should be good, since volume group was +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[127] : just varyied on or off +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[128] o_flag=TRUE +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[123] getopts :of option +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[142] shift 1 +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[144] vg_name=datavg +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[145] [[ -z datavg ]] +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[151] shift +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[152] node_list='' +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[153] /usr/es/sbin/cluster/utilities/cl_get_path all +epprd_rg:cl_update_vg_odm_ts(0.004):datavg[153] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin +epprd_rg:cl_update_vg_odm_ts(0.004):datavg[155] [[ -z '' ]] +epprd_rg:cl_update_vg_odm_ts(0.004):datavg[158] : Check to see if this update is necessary - some LVM levels automatically +epprd_rg:cl_update_vg_odm_ts(0.004):datavg[159] : update volume group timestamps clusterwide. +epprd_rg:cl_update_vg_odm_ts(0.004):datavg[163] instfix -iqk IV74100 +epprd_rg:cl_update_vg_odm_ts(0.005):datavg[163] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.012):datavg[164] instfix -iqk IV74883 +epprd_rg:cl_update_vg_odm_ts(0.013):datavg[164] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.019):datavg[165] instfix -iqk IV74698 +epprd_rg:cl_update_vg_odm_ts(0.020):datavg[165] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.027):datavg[166] instfix -iqk IV74246 +epprd_rg:cl_update_vg_odm_ts(0.027):datavg[166] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.034):datavg[174] emgr -l -L IV74883 +epprd_rg:cl_update_vg_odm_ts(0.035):datavg[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.309):datavg[174] emgr -l -L IV74698 +epprd_rg:cl_update_vg_odm_ts(0.310):datavg[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.582):datavg[174] emgr -l -L IV74246 +epprd_rg:cl_update_vg_odm_ts(0.583):datavg[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.852):datavg[183] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_update_vg_odm_ts(0.852):datavg[184] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_update_vg_odm_ts(0.852):datavg[185] : 99.99.999.999 +epprd_rg:cl_update_vg_odm_ts(0.852):datavg[187] typeset -li V R M F +epprd_rg:cl_update_vg_odm_ts(0.852):datavg[188] typeset -Z2 V +epprd_rg:cl_update_vg_odm_ts(0.852):datavg[189] typeset -Z2 R +epprd_rg:cl_update_vg_odm_ts(0.852):datavg[190] typeset -Z3 M +epprd_rg:cl_update_vg_odm_ts(0.852):datavg[191] typeset -Z3 F +epprd_rg:cl_update_vg_odm_ts(0.852):datavg[192] lvm_lvl6=601008015 +epprd_rg:cl_update_vg_odm_ts(0.852):datavg[192] typeset -li lvm_lvl6 +epprd_rg:cl_update_vg_odm_ts(0.852):datavg[194] lvm_lvl7=701003046 +epprd_rg:cl_update_vg_odm_ts(0.852):datavg[194] typeset -li lvm_lvl7 +epprd_rg:cl_update_vg_odm_ts(0.852):datavg[195] VRMF=0 +epprd_rg:cl_update_vg_odm_ts(0.852):datavg[195] typeset -li VRMF +epprd_rg:cl_update_vg_odm_ts(0.853):datavg[198] : Here try and figure out what level of LVM is installed +epprd_rg:cl_update_vg_odm_ts(0.853):datavg[200] lslpp -lcqOr bos.rte.lvm +epprd_rg:cl_update_vg_odm_ts(0.855):datavg[200] cut -f3 -d: +epprd_rg:cl_update_vg_odm_ts(0.854):datavg[200] read V R M F +epprd_rg:cl_update_vg_odm_ts(0.855):datavg[200] IFS=. +epprd_rg:cl_update_vg_odm_ts(0.856):datavg[201] VRMF=0702005101 +epprd_rg:cl_update_vg_odm_ts(0.856):datavg[203] (( 7 == 6 && 702005101 >= 601008015 )) +epprd_rg:cl_update_vg_odm_ts(0.856):datavg[204] (( 702005101 >= 701003046 )) +epprd_rg:cl_update_vg_odm_ts(0.856):datavg[207] : LVM at a level in which timestamp update is unnecessary +epprd_rg:cl_update_vg_odm_ts(0.856):datavg[209] return 0 +epprd_rg:clvaryonvg(4.046):datavg[1674] : On successful varyon, clean up any files used to track errors with +epprd_rg:clvaryonvg(4.046):datavg[1675] : this volume group +epprd_rg:clvaryonvg(4.046):datavg[1677] rm -f /usr/es/sbin/cluster/etc/vg/datavg.desc /usr/es/sbin/cluster/etc/vg/datavg.replay /usr/es/sbin/cluster/etc/vg/datavg.perms /usr/es/sbin/cluster/etc/vg/datavg.tstamp /usr/es/sbin/cluster/etc/vg/datavg.fail +epprd_rg:clvaryonvg(4.049):datavg[1680] : Note that a sync has not been done on the volume group at this point. +epprd_rg:clvaryonvg(4.049):datavg[1681] : A sync is kicked off in cl_sync_vgs, once any filesystem mounts are +epprd_rg:clvaryonvg(4.049):datavg[1682] : complete. A sync at this time would interfere with the mounts +epprd_rg:clvaryonvg(4.049):datavg[1685] return 0 +epprd_rg:cl_activate_vgs(4.140):datavg[vgs_chk:103] ERRMSG=$'cl_set_vg_fence_height[126]: version @(#)10\t1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37\ncl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY)\ncl_set_vg_fence_height[214]: read(datavg, 16)\ncl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid)\ncl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0))' +epprd_rg:cl_activate_vgs(4.141):datavg[vgs_chk:104] RC=0 +epprd_rg:cl_activate_vgs(4.141):datavg[vgs_chk:107] (( 0 == 1 || 0 == 20 )) +epprd_rg:cl_activate_vgs(4.141):datavg[vgs_chk:115] : exit status of clvaryonvg -n datavg: 0 +epprd_rg:cl_activate_vgs(4.141):datavg[vgs_chk:117] [[ -n $'cl_set_vg_fence_height[126]: version @(#)10\t1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37\ncl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY)\ncl_set_vg_fence_height[214]: read(datavg, 16)\ncl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid)\ncl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0))' ]] +epprd_rg:cl_activate_vgs(4.141):datavg[vgs_chk:117] (( 0 != 1 )) +epprd_rg:cl_activate_vgs(4.141):datavg[vgs_chk:119] cl_echo 286 $'cl_activate_vgs: Successful clvaryonvg of datavg with message cl_set_vg_fence_height[126]: version @(#)10\t1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37\ncl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY)\ncl_set_vg_fence_height[214]: read(datavg, 16)\ncl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid)\ncl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0)).' cl_activate_vgs datavg 'cl_set_vg_fence_height[126]:' version '@(#)10' 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 'cl_set_vg_fence_height[180]:' 'open(/usr/es/sbin/cluster/etc/vg/datavg.uuid,' 'O_RDONLY)' 'cl_set_vg_fence_height[214]:' 'read(datavg,' '16)' 'cl_set_vg_fence_height[237]:' 'close(/usr/es/sbin/cluster/etc/vg/datavg.uuid)' 'cl_set_vg_fence_height[265]:' 'sfwSetFenceGroup(vg=datavg' uuid=ec2db4422261eae02091227fb9e53c88 height='rw(0))' Jan 28 2023 19:50:43cl_activate_vgs: Successful clvaryonvg of datavg with message cl_set_vg_fence_height[126]: version @(#)10 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 cl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY) cl_set_vg_fence_height[214]: read(datavg, 16) cl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0)).+epprd_rg:cl_activate_vgs(4.160):datavg[vgs_chk:123] [[ 0 != 0 ]] +epprd_rg:cl_activate_vgs(4.160):datavg[vgs_chk:127] amlog_trace '' 'Activating Volume Group|datavg' +epprd_rg:cl_activate_vgs(4.160):datavg[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_vgs(4.161):datavg[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_vgs(4.186):datavg[amlog_trace:319] cltime +epprd_rg:cl_activate_vgs(4.189):datavg[amlog_trace:319] DATE=2023-01-28T19:50:43.252836 +epprd_rg:cl_activate_vgs(4.189):datavg[amlog_trace:320] echo '|2023-01-28T19:50:43.252836|INFO: Activating Volume Group|datavg' +epprd_rg:cl_activate_vgs(4.189):datavg[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_vgs(4.189):datavg[vgs_chk:132] echo datavg 0 +epprd_rg:cl_activate_vgs(4.189):datavg[vgs_chk:132] 1>> /tmp/_activate_vgs.tmp +epprd_rg:cl_activate_vgs(4.189):datavg[vgs_chk:133] return 0 +epprd_rg:cl_activate_vgs:datavg[vgs_list:198] unset PS4_LOOP PS4_TIMER +epprd_rg:cl_activate_vgs[304] wait +epprd_rg:cl_activate_vgs[310] ALLNOERRVGS=All_nonerror_volume_groups +epprd_rg:cl_activate_vgs[311] cl_RMupdate resource_up All_nonerror_volume_groups cl_activate_vgs 2023-01-28T19:50:43.276234 2023-01-28T19:50:43.280807 +epprd_rg:cl_activate_vgs[318] [[ -f /tmp/_activate_vgs.tmp ]] +epprd_rg:cl_activate_vgs[320] grep ' 1' /tmp/_activate_vgs.tmp +epprd_rg:cl_activate_vgs[329] rm -f /tmp/_activate_vgs.tmp +epprd_rg:cl_activate_vgs[332] exit 0 +epprd_rg:process_resources[process_volume_groups:2584] RC=0 +epprd_rg:process_resources[process_volume_groups:2585] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[process_volume_groups:2598] (( 0 != 0 )) +epprd_rg:process_resources[process_volume_groups:2627] return 0 +epprd_rg:process_resources[process_volume_groups_main:2556] STAT=0 +epprd_rg:process_resources[process_volume_groups_main:2559] return 0 +epprd_rg:process_resources[3572] RC=0 +epprd_rg:process_resources[3573] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T19:50:43.299788 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=LOGREDO ACTION=ACQUIRE VOLUME_GROUPS='"datavg"' RESOURCE_GROUPS='"epprd_rg' '"' +epprd_rg:process_resources[1] JOB_TYPE=LOGREDO +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] VOLUME_GROUPS=datavg +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ LOGREDO == RELEASE ]] +epprd_rg:process_resources[3360] [[ LOGREDO == ONLINE ]] +epprd_rg:process_resources[3634] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[3635] logredo_volume_groups +epprd_rg:process_resources[logredo_volume_groups:2745] PS4_FUNC=logredo_volume_groups +epprd_rg:process_resources[logredo_volume_groups:2745] typeset PS4_FUNC +epprd_rg:process_resources(5.162)[logredo_volume_groups:2746] PS4_TIMER=true +epprd_rg:process_resources(5.163)[logredo_volume_groups:2746] typeset PS4_TIMER +epprd_rg:process_resources(5.163)[logredo_volume_groups:2747] [[ high == high ]] +epprd_rg:process_resources(5.163)[logredo_volume_groups:2747] set -x +epprd_rg:process_resources(5.163)[logredo_volume_groups:2749] TMP_FILE=/var/hacmp/log/.process_resources_logredo.26542458 +epprd_rg:process_resources(5.163)[logredo_volume_groups:2749] export TMP_FILE +epprd_rg:process_resources(5.163)[logredo_volume_groups:2750] rm -f '/var/hacmp/log/.process_resources_logredo*' +epprd_rg:process_resources(5.166)[logredo_volume_groups:2752] STAT=0 +epprd_rg:process_resources(5.166)[logredo_volume_groups:2755] export GROUPNAME +epprd_rg:process_resources(5.167)[logredo_volume_groups:2757] get_list_head datavg +epprd_rg:process_resources(5.167)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(5.167)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(5.167)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(5.167)[get_list_head:60] set -x +epprd_rg:process_resources(5.168)[get_list_head:61] echo datavg +epprd_rg:process_resources(5.168)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(5.168)[get_list_head:61] IFS=: +epprd_rg:process_resources(5.169)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(5.169)[get_list_head:62] echo datavg +epprd_rg:process_resources(5.167)[logredo_volume_groups:2757] read LIST_OF_VOLUME_GROUPS_FOR_RG +epprd_rg:process_resources(5.173)[logredo_volume_groups:2758] get_list_tail datavg +epprd_rg:process_resources(5.173)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(5.173)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(5.174)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(5.174)[get_list_tail:68] set -x +epprd_rg:process_resources(5.175)[get_list_tail:69] echo datavg +epprd_rg:process_resources(5.175)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(5.175)[get_list_tail:69] IFS=: +epprd_rg:process_resources(5.175)[get_list_tail:70] echo +epprd_rg:process_resources(5.173)[logredo_volume_groups:2758] read VOLUME_GROUPS +epprd_rg:process_resources(5.175)[logredo_volume_groups:2761] : Run logredo on all JFS/JFS2 log devices to assure FS consistency +epprd_rg:process_resources(5.175)[logredo_volume_groups:2763] ALL_LVs='' +epprd_rg:process_resources(5.175)[logredo_volume_groups:2764] lv_all='' +epprd_rg:process_resources(5.175)[logredo_volume_groups:2765] mount_fs='' +epprd_rg:process_resources(5.175)[logredo_volume_groups:2766] fsck_check='' +epprd_rg:process_resources(5.175)[logredo_volume_groups:2767] MOUNTGUARD='' +epprd_rg:process_resources(5.175)[logredo_volume_groups:2768] FMMOUNT_OUT='' +epprd_rg:process_resources(5.175)[logredo_volume_groups:2769] FMMOUNT='' +epprd_rg:process_resources(5.177)[logredo_volume_groups:2772] tail +3 +epprd_rg:process_resources(5.177)[logredo_volume_groups:2772] lsvg -lL datavg +epprd_rg:process_resources(5.177)[logredo_volume_groups:2772] LC_ALL=C +epprd_rg:process_resources(5.179)[logredo_volume_groups:2772] 1>> /var/hacmp/log/.process_resources_logredo.26542458 +epprd_rg:process_resources(5.200)[logredo_volume_groups:2774] awk '{print $1}' +epprd_rg:process_resources(5.200)[logredo_volume_groups:2774] cat /var/hacmp/log/.process_resources_logredo.26542458 +epprd_rg:process_resources(5.205)[logredo_volume_groups:2774] ALL_LVs=$'epprdaloglv\nsaplv\nsapmntlv\noraclelv\nepplv\noraarchlv\nsapdata1lv\nsapdata2lv\nsapdata3lv\nsapdata4lv\nboardlv\noriglogAlv\noriglogBlv\nmirrlogAlv\nmirrlogBlv' +epprd_rg:process_resources(5.206)[logredo_volume_groups:2777] : Verify if any of the file system associated with volume group datavg +epprd_rg:process_resources(5.206)[logredo_volume_groups:2778] : is already mounted anywhere else in the cluster. +epprd_rg:process_resources(5.206)[logredo_volume_groups:2779] : If it is already mounted somewhere else, we dont want to continue +epprd_rg:process_resources(5.206)[logredo_volume_groups:2780] : here to avoid data corruption. +epprd_rg:process_resources(5.208)[logredo_volume_groups:2782] awk '{print $1}' +epprd_rg:process_resources(5.208)[logredo_volume_groups:2782] cat /var/hacmp/log/.process_resources_logredo.26542458 +epprd_rg:process_resources(5.208)[logredo_volume_groups:2782] grep -v N/A +epprd_rg:process_resources(5.213)[logredo_volume_groups:2782] lv_all=$'saplv\nsapmntlv\noraclelv\nepplv\noraarchlv\nsapdata1lv\nsapdata2lv\nsapdata3lv\nsapdata4lv\nboardlv\noriglogAlv\noriglogBlv\nmirrlogAlv\nmirrlogBlv' +epprd_rg:process_resources(5.213)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.213)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.215)[logredo_volume_groups:2789] lsfs -qc saplv +epprd_rg:process_resources(5.216)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.216)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.217)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/saplv' was found in /etc/filesystems. +epprd_rg:process_resources(5.218)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.222)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.222)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.222)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.222)[logredo_volume_groups:2795] fsdb saplv +epprd_rg:process_resources(5.223)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.227)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.229)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.229)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.229)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.234)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.234)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.234)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.234)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.234)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.236)[logredo_volume_groups:2789] lsfs -qc sapmntlv +epprd_rg:process_resources(5.237)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.237)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.237)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/sapmntlv' was found in /etc/filesystems. +epprd_rg:process_resources(5.239)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.243)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.243)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.243)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.243)[logredo_volume_groups:2795] fsdb sapmntlv +epprd_rg:process_resources(5.244)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.247)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.249)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.250)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.250)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.255)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.255)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.255)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.255)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.255)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.257)[logredo_volume_groups:2789] lsfs -qc oraclelv +epprd_rg:process_resources(5.257)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.258)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.258)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/oraclelv' was found in /etc/filesystems. +epprd_rg:process_resources(5.260)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.264)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.264)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.264)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.264)[logredo_volume_groups:2795] fsdb oraclelv +epprd_rg:process_resources(5.265)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.268)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.270)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.271)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.271)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.276)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.276)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.276)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.276)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.276)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.278)[logredo_volume_groups:2789] lsfs -qc epplv +epprd_rg:process_resources(5.278)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.279)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.279)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/epplv' was found in /etc/filesystems. +epprd_rg:process_resources(5.280)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.284)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.284)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.284)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.285)[logredo_volume_groups:2795] fsdb epplv +epprd_rg:process_resources(5.286)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.289)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.291)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.291)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.291)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.296)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.296)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.296)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.297)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.297)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.299)[logredo_volume_groups:2789] lsfs -qc oraarchlv +epprd_rg:process_resources(5.299)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.299)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.300)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/oraarchlv' was found in /etc/filesystems. +epprd_rg:process_resources(5.301)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.305)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.305)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.305)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.305)[logredo_volume_groups:2795] fsdb oraarchlv +epprd_rg:process_resources(5.306)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.309)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.312)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.312)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.312)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.317)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.317)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.317)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.317)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.317)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.319)[logredo_volume_groups:2789] lsfs -qc sapdata1lv +epprd_rg:process_resources(5.320)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.320)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.320)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/sapdata1lv' was found in /etc/filesystems. +epprd_rg:process_resources(5.322)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.326)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.326)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.326)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.326)[logredo_volume_groups:2795] fsdb sapdata1lv +epprd_rg:process_resources(5.327)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.331)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.333)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.333)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.333)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.338)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.338)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.338)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.338)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.338)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.340)[logredo_volume_groups:2789] lsfs -qc sapdata2lv +epprd_rg:process_resources(5.341)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.341)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.341)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/sapdata2lv' was found in /etc/filesystems. +epprd_rg:process_resources(5.343)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.347)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.347)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.347)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.347)[logredo_volume_groups:2795] fsdb sapdata2lv +epprd_rg:process_resources(5.348)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.351)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.354)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.354)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.354)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.359)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.359)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.359)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.360)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.360)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.362)[logredo_volume_groups:2789] lsfs -qc sapdata3lv +epprd_rg:process_resources(5.362)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.362)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.363)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/sapdata3lv' was found in /etc/filesystems. +epprd_rg:process_resources(5.364)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.368)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.368)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.368)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.368)[logredo_volume_groups:2795] fsdb sapdata3lv +epprd_rg:process_resources(5.370)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.373)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.375)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.375)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.375)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.380)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.380)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.380)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.380)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.380)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.383)[logredo_volume_groups:2789] lsfs -qc sapdata4lv +epprd_rg:process_resources(5.383)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.383)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.384)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/sapdata4lv' was found in /etc/filesystems. +epprd_rg:process_resources(5.385)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.389)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.389)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.389)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.389)[logredo_volume_groups:2795] fsdb sapdata4lv +epprd_rg:process_resources(5.390)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.394)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.396)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.396)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.396)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.401)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.401)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.401)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.401)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.401)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.403)[logredo_volume_groups:2789] lsfs -qc boardlv +epprd_rg:process_resources(5.403)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.404)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.404)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/boardlv' was found in /etc/filesystems. +epprd_rg:process_resources(5.406)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.410)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.410)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.410)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.410)[logredo_volume_groups:2795] fsdb boardlv +epprd_rg:process_resources(5.411)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.414)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.416)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.417)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.417)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.422)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.422)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.422)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.422)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.422)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.424)[logredo_volume_groups:2789] lsfs -qc origlogAlv +epprd_rg:process_resources(5.424)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.425)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.425)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/origlogAlv' was found in /etc/filesystems. +epprd_rg:process_resources(5.427)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.431)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.431)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.431)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.431)[logredo_volume_groups:2795] fsdb origlogAlv +epprd_rg:process_resources(5.432)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.435)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.437)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.438)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.438)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.443)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.443)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.443)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.443)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.443)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.445)[logredo_volume_groups:2789] lsfs -qc origlogBlv +epprd_rg:process_resources(5.445)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.445)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.446)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/origlogBlv' was found in /etc/filesystems. +epprd_rg:process_resources(5.447)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.451)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.451)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.451)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.451)[logredo_volume_groups:2795] fsdb origlogBlv +epprd_rg:process_resources(5.453)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.456)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.458)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.459)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.459)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.464)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.464)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.464)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.464)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.464)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.466)[logredo_volume_groups:2789] lsfs -qc mirrlogAlv +epprd_rg:process_resources(5.466)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.466)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.467)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/mirrlogAlv' was found in /etc/filesystems. +epprd_rg:process_resources(5.468)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.472)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.472)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.472)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.472)[logredo_volume_groups:2795] fsdb mirrlogAlv +epprd_rg:process_resources(5.474)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.477)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.479)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.479)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.479)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.484)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.484)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.484)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.485)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.485)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.487)[logredo_volume_groups:2789] lsfs -qc mirrlogBlv +epprd_rg:process_resources(5.487)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.487)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.488)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/mirrlogBlv' was found in /etc/filesystems. +epprd_rg:process_resources(5.489)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.493)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.493)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.493)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.493)[logredo_volume_groups:2795] fsdb mirrlogBlv +epprd_rg:process_resources(5.494)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.498)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.500)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.500)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.500)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.505)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.505)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.505)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.505)[logredo_volume_groups:2814] comm_failure='' +epprd_rg:process_resources(5.505)[logredo_volume_groups:2815] rc_mount='' +epprd_rg:process_resources(5.505)[logredo_volume_groups:2816] [[ -n '' ]] +epprd_rg:process_resources(5.505)[logredo_volume_groups:2851] logdevs='' +epprd_rg:process_resources(5.505)[logredo_volume_groups:2852] HAVE_GEO='' +epprd_rg:process_resources(5.505)[logredo_volume_groups:2853] lslpp -l 'hageo.*' +epprd_rg:process_resources(5.506)[logredo_volume_groups:2853] 1> /dev/null 2>& 1 +epprd_rg:process_resources(5.509)[logredo_volume_groups:2854] lslpp -l 'geoRM.*' +epprd_rg:process_resources(5.510)[logredo_volume_groups:2854] 1> /dev/null 2>& 1 +epprd_rg:process_resources(5.513)[logredo_volume_groups:2874] pattern='jfs*log' +epprd_rg:process_resources(5.513)[logredo_volume_groups:2876] : Any device with the type as log should be added +epprd_rg:process_resources(5.513)[logredo_volume_groups:2882] odmget -q $'name = epprdaloglv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.517)[logredo_volume_groups:2882] [[ -n $'\nCuAt:\n\tname = "epprdaloglv"\n\tattribute = "type"\n\tvalue = "jfs2log"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.517)[logredo_volume_groups:2884] logdevs=' /dev/epprdaloglv' +epprd_rg:process_resources(5.517)[logredo_volume_groups:2882] odmget -q $'name = saplv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.521)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.521)[logredo_volume_groups:2882] odmget -q $'name = sapmntlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.524)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.524)[logredo_volume_groups:2882] odmget -q $'name = oraclelv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.528)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.528)[logredo_volume_groups:2882] odmget -q $'name = epplv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.531)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.531)[logredo_volume_groups:2882] odmget -q $'name = oraarchlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.535)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.535)[logredo_volume_groups:2882] odmget -q $'name = sapdata1lv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.539)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.539)[logredo_volume_groups:2882] odmget -q $'name = sapdata2lv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.542)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.542)[logredo_volume_groups:2882] odmget -q $'name = sapdata3lv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.546)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.546)[logredo_volume_groups:2882] odmget -q $'name = sapdata4lv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.549)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.550)[logredo_volume_groups:2882] odmget -q $'name = boardlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.553)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.553)[logredo_volume_groups:2882] odmget -q $'name = origlogAlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.557)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.557)[logredo_volume_groups:2882] odmget -q $'name = origlogBlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.561)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.561)[logredo_volume_groups:2882] odmget -q $'name = mirrlogAlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.564)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.564)[logredo_volume_groups:2882] odmget -q $'name = mirrlogBlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.568)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.568)[logredo_volume_groups:2889] : JFS2 file systems can have inline logs where the log LV is the same as the FS LV. +epprd_rg:process_resources(5.568)[logredo_volume_groups:2895] odmget $'-qname = epprdaloglv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.571)[logredo_volume_groups:2895] [[ -n '' ]] +epprd_rg:process_resources(5.571)[logredo_volume_groups:2895] odmget $'-qname = saplv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.575)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "saplv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.577)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.577)[logredo_volume_groups:2898] odmget -q 'name = saplv and attribute = label' CuAt +epprd_rg:process_resources(5.581)[logredo_volume_groups:2898] [[ -n /usr/sap ]] +epprd_rg:process_resources(5.583)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.583)[logredo_volume_groups:2900] grep -wp /dev/saplv /etc/filesystems +epprd_rg:process_resources(5.588)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.588)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.588)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/saplv ]] +epprd_rg:process_resources(5.588)[logredo_volume_groups:2895] odmget $'-qname = sapmntlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.592)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "sapmntlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.594)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.594)[logredo_volume_groups:2898] odmget -q 'name = sapmntlv and attribute = label' CuAt +epprd_rg:process_resources(5.598)[logredo_volume_groups:2898] [[ -n /sapmnt ]] +epprd_rg:process_resources(5.600)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.600)[logredo_volume_groups:2900] grep -wp /dev/sapmntlv /etc/filesystems +epprd_rg:process_resources(5.606)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.606)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.606)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/sapmntlv ]] +epprd_rg:process_resources(5.606)[logredo_volume_groups:2895] odmget $'-qname = oraclelv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.609)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "oraclelv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.611)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.611)[logredo_volume_groups:2898] odmget -q 'name = oraclelv and attribute = label' CuAt +epprd_rg:process_resources(5.615)[logredo_volume_groups:2898] [[ -n /oracle ]] +epprd_rg:process_resources(5.618)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.618)[logredo_volume_groups:2900] grep -wp /dev/oraclelv /etc/filesystems +epprd_rg:process_resources(5.623)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.623)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.623)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/oraclelv ]] +epprd_rg:process_resources(5.623)[logredo_volume_groups:2895] odmget $'-qname = epplv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.626)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "epplv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.628)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.628)[logredo_volume_groups:2898] odmget -q 'name = epplv and attribute = label' CuAt +epprd_rg:process_resources(5.633)[logredo_volume_groups:2898] [[ -n /oracle/EPP ]] +epprd_rg:process_resources(5.635)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.635)[logredo_volume_groups:2900] grep -wp /dev/epplv /etc/filesystems +epprd_rg:process_resources(5.640)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.640)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.640)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/epplv ]] +epprd_rg:process_resources(5.640)[logredo_volume_groups:2895] odmget $'-qname = oraarchlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.644)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "oraarchlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.646)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.646)[logredo_volume_groups:2898] odmget -q 'name = oraarchlv and attribute = label' CuAt +epprd_rg:process_resources(5.650)[logredo_volume_groups:2898] [[ -n /oracle/EPP/oraarch ]] +epprd_rg:process_resources(5.652)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.652)[logredo_volume_groups:2900] grep -wp /dev/oraarchlv /etc/filesystems +epprd_rg:process_resources(5.657)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.657)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.657)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/oraarchlv ]] +epprd_rg:process_resources(5.657)[logredo_volume_groups:2895] odmget $'-qname = sapdata1lv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.661)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "sapdata1lv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.663)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.663)[logredo_volume_groups:2898] odmget -q 'name = sapdata1lv and attribute = label' CuAt +epprd_rg:process_resources(5.667)[logredo_volume_groups:2898] [[ -n /oracle/EPP/sapdata1 ]] +epprd_rg:process_resources(5.669)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.669)[logredo_volume_groups:2900] grep -wp /dev/sapdata1lv /etc/filesystems +epprd_rg:process_resources(5.674)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.674)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.674)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/sapdata1lv ]] +epprd_rg:process_resources(5.674)[logredo_volume_groups:2895] odmget $'-qname = sapdata2lv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.678)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "sapdata2lv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.680)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.680)[logredo_volume_groups:2898] odmget -q 'name = sapdata2lv and attribute = label' CuAt +epprd_rg:process_resources(5.684)[logredo_volume_groups:2898] [[ -n /oracle/EPP/sapdata2 ]] +epprd_rg:process_resources(5.686)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.686)[logredo_volume_groups:2900] grep -wp /dev/sapdata2lv /etc/filesystems +epprd_rg:process_resources(5.691)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.691)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.691)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/sapdata2lv ]] +epprd_rg:process_resources(5.692)[logredo_volume_groups:2895] odmget $'-qname = sapdata3lv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.695)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "sapdata3lv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.697)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.698)[logredo_volume_groups:2898] odmget -q 'name = sapdata3lv and attribute = label' CuAt +epprd_rg:process_resources(5.702)[logredo_volume_groups:2898] [[ -n /oracle/EPP/sapdata3 ]] +epprd_rg:process_resources(5.704)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.704)[logredo_volume_groups:2900] grep -wp /dev/sapdata3lv /etc/filesystems +epprd_rg:process_resources(5.709)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.709)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.709)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/sapdata3lv ]] +epprd_rg:process_resources(5.709)[logredo_volume_groups:2895] odmget $'-qname = sapdata4lv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.713)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "sapdata4lv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.715)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.715)[logredo_volume_groups:2898] odmget -q 'name = sapdata4lv and attribute = label' CuAt +epprd_rg:process_resources(5.719)[logredo_volume_groups:2898] [[ -n /oracle/EPP/sapdata4 ]] +epprd_rg:process_resources(5.721)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.721)[logredo_volume_groups:2900] grep -wp /dev/sapdata4lv /etc/filesystems +epprd_rg:process_resources(5.726)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.726)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.726)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/sapdata4lv ]] +epprd_rg:process_resources(5.726)[logredo_volume_groups:2895] odmget $'-qname = boardlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.730)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "boardlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.732)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.732)[logredo_volume_groups:2898] odmget -q 'name = boardlv and attribute = label' CuAt +epprd_rg:process_resources(5.736)[logredo_volume_groups:2898] [[ -n /board_org ]] +epprd_rg:process_resources(5.738)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.738)[logredo_volume_groups:2900] grep -wp /dev/boardlv /etc/filesystems +epprd_rg:process_resources(5.744)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.744)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.744)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/boardlv ]] +epprd_rg:process_resources(5.744)[logredo_volume_groups:2895] odmget $'-qname = origlogAlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.747)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "origlogAlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.749)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.749)[logredo_volume_groups:2898] odmget -q 'name = origlogAlv and attribute = label' CuAt +epprd_rg:process_resources(5.753)[logredo_volume_groups:2898] [[ -n /oracle/EPP/origlogA ]] +epprd_rg:process_resources(5.756)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.756)[logredo_volume_groups:2900] grep -wp /dev/origlogAlv /etc/filesystems +epprd_rg:process_resources(5.761)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.761)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.761)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/origlogAlv ]] +epprd_rg:process_resources(5.761)[logredo_volume_groups:2895] odmget $'-qname = origlogBlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.764)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "origlogBlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.767)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.767)[logredo_volume_groups:2898] odmget -q 'name = origlogBlv and attribute = label' CuAt +epprd_rg:process_resources(5.771)[logredo_volume_groups:2898] [[ -n /oracle/EPP/origlogB ]] +epprd_rg:process_resources(5.773)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.773)[logredo_volume_groups:2900] grep -wp /dev/origlogBlv /etc/filesystems +epprd_rg:process_resources(5.778)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.778)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.778)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/origlogBlv ]] +epprd_rg:process_resources(5.778)[logredo_volume_groups:2895] odmget $'-qname = mirrlogAlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.782)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "mirrlogAlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.784)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.784)[logredo_volume_groups:2898] odmget -q 'name = mirrlogAlv and attribute = label' CuAt +epprd_rg:process_resources(5.788)[logredo_volume_groups:2898] [[ -n /oracle/EPP/mirrlogA ]] +epprd_rg:process_resources(5.790)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.790)[logredo_volume_groups:2900] grep -wp /dev/mirrlogAlv /etc/filesystems +epprd_rg:process_resources(5.795)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.795)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.795)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/mirrlogAlv ]] +epprd_rg:process_resources(5.795)[logredo_volume_groups:2895] odmget $'-qname = mirrlogBlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.799)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "mirrlogBlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.801)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.801)[logredo_volume_groups:2898] odmget -q 'name = mirrlogBlv and attribute = label' CuAt +epprd_rg:process_resources(5.805)[logredo_volume_groups:2898] [[ -n /oracle/EPP/mirrlogB ]] +epprd_rg:process_resources(5.807)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.807)[logredo_volume_groups:2900] grep -wp /dev/mirrlogBlv /etc/filesystems +epprd_rg:process_resources(5.812)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.813)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.813)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/mirrlogBlv ]] +epprd_rg:process_resources(5.813)[logredo_volume_groups:2910] : Remove any duplicates acquired so far +epprd_rg:process_resources(5.815)[logredo_volume_groups:2912] echo /dev/epprdaloglv +epprd_rg:process_resources(5.815)[logredo_volume_groups:2912] sort -u +epprd_rg:process_resources(5.815)[logredo_volume_groups:2912] tr ' ' '\n' +epprd_rg:process_resources(5.822)[logredo_volume_groups:2912] logdevs=/dev/epprdaloglv +epprd_rg:process_resources(5.822)[logredo_volume_groups:2915] : Run logredos in parallel to save time. +epprd_rg:process_resources(5.822)[logredo_volume_groups:2919] [[ -n '' ]] +epprd_rg:process_resources(5.822)[logredo_volume_groups:2944] : Run logredo only if the LV is closed. +epprd_rg:process_resources(5.822)[logredo_volume_groups:2946] awk '$1 ~ /^epprdaloglv$/ && $6 ~ /closed\// {print "CLOSED"}' /var/hacmp/log/.process_resources_logredo.26542458 +epprd_rg:process_resources(5.826)[logredo_volume_groups:2946] [[ -n CLOSED ]] +epprd_rg:process_resources(5.826)[logredo_volume_groups:2949] : Run logredo only if filesystem is not mounted on any of the node in the cluster. +epprd_rg:process_resources(5.826)[logredo_volume_groups:2951] [[ -z '' ]] +epprd_rg:process_resources(5.827)[logredo_volume_groups:2958] rm -f /var/hacmp/log/.process_resources_logredo.26542458 +epprd_rg:process_resources(5.827)[logredo_volume_groups:2953] logredo /dev/epprdaloglv +epprd_rg:process_resources(5.831)[logredo_volume_groups:2962] : Wait for the background logredos from the RGs +epprd_rg:process_resources(5.832)[logredo_volume_groups:2964] wait J2_LOGREDO:log redo processing for /dev/epprdaloglv +epprd_rg:process_resources(5.839)[logredo_volume_groups:2966] return 0 +epprd_rg:process_resources(5.839)[3324] true +epprd_rg:process_resources(5.839)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(5.839)[3328] set -a +epprd_rg:process_resources(5.839)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T19:50:43.996056 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(5.858)[3329] eval JOB_TYPE=FILESYSTEMS ACTION=ACQUIRE FILE_SYSTEMS='"/board_org,/oracle,/oracle/EPP,/oracle/EPP/mirrlogA,/oracle/EPP/mirrlogB,/oracle/EPP/oraarch,/oracle/EPP/origlogA,/oracle/EPP/origlogB,/oracle/EPP/sapdata1,/oracle/EPP/sapdata2,/oracle/EPP/sapdata3,/oracle/EPP/sapdata4,/sapmnt,/usr/sap"' RESOURCE_GROUPS='"epprd_rg' '"' FSCHECK_TOOLS='"fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck"' RECOVERY_METHODS='"sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential"' +epprd_rg:process_resources(5.858)[1] JOB_TYPE=FILESYSTEMS +epprd_rg:process_resources(5.858)[1] ACTION=ACQUIRE +epprd_rg:process_resources(5.858)[1] FILE_SYSTEMS=/board_org,/oracle,/oracle/EPP,/oracle/EPP/mirrlogA,/oracle/EPP/mirrlogB,/oracle/EPP/oraarch,/oracle/EPP/origlogA,/oracle/EPP/origlogB,/oracle/EPP/sapdata1,/oracle/EPP/sapdata2,/oracle/EPP/sapdata3,/oracle/EPP/sapdata4,/sapmnt,/usr/sap +epprd_rg:process_resources(5.858)[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources(5.858)[1] FSCHECK_TOOLS=fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck +epprd_rg:process_resources(5.858)[1] RECOVERY_METHODS=sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:process_resources(5.858)[3330] RC=0 +epprd_rg:process_resources(5.858)[3331] set +a +epprd_rg:process_resources(5.858)[3333] (( 0 != 0 )) +epprd_rg:process_resources(5.858)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(5.858)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(5.858)[3343] export GROUPNAME +epprd_rg:process_resources(5.858)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(5.858)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(5.858)[3360] [[ FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources(5.858)[3360] [[ FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources(5.858)[3482] process_file_systems ACQUIRE +epprd_rg:process_resources(5.858)[process_file_systems:2640] PS4_FUNC=process_file_systems +epprd_rg:process_resources(5.858)[process_file_systems:2640] typeset PS4_FUNC +epprd_rg:process_resources(5.858)[process_file_systems:2641] [[ high == high ]] +epprd_rg:process_resources(5.858)[process_file_systems:2641] set -x +epprd_rg:process_resources(5.858)[process_file_systems:2643] STAT=0 +epprd_rg:process_resources(5.858)[process_file_systems:2645] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources(5.858)[process_file_systems:2647] cl_activate_fs +epprd_rg:cl_activate_fs[819] version=1.1.8.5 +epprd_rg:cl_activate_fs[823] : Check for mounting OEM file systems +epprd_rg:cl_activate_fs[825] OEM_FS=false +epprd_rg:cl_activate_fs[826] (( 0 != 0 )) +epprd_rg:cl_activate_fs[832] STATUS=0 +epprd_rg:cl_activate_fs[832] typeset -li STATUS +epprd_rg:cl_activate_fs[833] EMULATE=REAL +epprd_rg:cl_activate_fs[836] : The environment variable MOUNT_WLMCNTRL_SELFMANAGE is referred inside mount. +epprd_rg:cl_activate_fs[837] : If this variable is set, few calls to wlmcntrl are skipped inside mount, which +epprd_rg:cl_activate_fs[838] : offers performance benefits. Hence we will export this variable if it is set +epprd_rg:cl_activate_fs[839] : in /etc/environment. +epprd_rg:cl_activate_fs[841] grep -w ^MOUNT_WLMCNTRL_SELFMANAGE /etc/environment +epprd_rg:cl_activate_fs[841] export eval +epprd_rg:cl_activate_fs[843] [[ -n FILESYSTEMS ]] +epprd_rg:cl_activate_fs[843] [[ FILESYSTEMS != GROUP ]] +epprd_rg:cl_activate_fs[846] : If JOB_TYPE is set, and it does not equal to GROUP, then +epprd_rg:cl_activate_fs[847] : we are processing for process_resources, which passes requests +epprd_rg:cl_activate_fs[848] : associaed with multiple resource groups through environment variables +epprd_rg:cl_activate_fs[850] activate_fs_process_resources +epprd_rg:cl_activate_fs[activate_fs_process_resources:716] [[ high == high ]] +epprd_rg:cl_activate_fs[activate_fs_process_resources:716] set -x +epprd_rg:cl_activate_fs[activate_fs_process_resources:718] ERRSTATUS=0 +epprd_rg:cl_activate_fs[activate_fs_process_resources:718] typeset -i ERRSTATUS +epprd_rg:cl_activate_fs[activate_fs_process_resources:719] RC=0 +epprd_rg:cl_activate_fs[activate_fs_process_resources:719] typeset -li RC +epprd_rg:cl_activate_fs[activate_fs_process_resources:742] export GROUPNAME +epprd_rg:cl_activate_fs[activate_fs_process_resources:745] : Get the file systems, recovery tool and procedure for this +epprd_rg:cl_activate_fs[activate_fs_process_resources:746] : resource group +epprd_rg:cl_activate_fs[activate_fs_process_resources:748] print /board_org,/oracle,/oracle/EPP,/oracle/EPP/mirrlogA,/oracle/EPP/mirrlogB,/oracle/EPP/oraarch,/oracle/EPP/origlogA,/oracle/EPP/origlogB,/oracle/EPP/sapdata1,/oracle/EPP/sapdata2,/oracle/EPP/sapdata3,/oracle/EPP/sapdata4,/sapmnt,/usr/sap +epprd_rg:cl_activate_fs[activate_fs_process_resources:748] read _RG_FILE_SYSTEMS FILE_SYSTEMS +epprd_rg:cl_activate_fs[activate_fs_process_resources:748] IFS=: +epprd_rg:cl_activate_fs[activate_fs_process_resources:749] print fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck +epprd_rg:cl_activate_fs[activate_fs_process_resources:749] read _RG_FSCHECK_TOOLS FSCHECK_TOOLS +epprd_rg:cl_activate_fs[activate_fs_process_resources:749] IFS=: +epprd_rg:cl_activate_fs[activate_fs_process_resources:750] print sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:cl_activate_fs[activate_fs_process_resources:750] read _RG_RECOVERY_METHODS RECOVERY_METHODS +epprd_rg:cl_activate_fs[activate_fs_process_resources:750] IFS=: +epprd_rg:cl_activate_fs[activate_fs_process_resources:753] : Since all file systems in a resource group use the same recovery +epprd_rg:cl_activate_fs[activate_fs_process_resources:754] : method and recovery means, just pick up the first one in the list +epprd_rg:cl_activate_fs[activate_fs_process_resources:756] print fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck +epprd_rg:cl_activate_fs[activate_fs_process_resources:756] read FSCHECK_TOOL rest +epprd_rg:cl_activate_fs[activate_fs_process_resources:756] IFS=, +epprd_rg:cl_activate_fs[activate_fs_process_resources:757] print sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:cl_activate_fs[activate_fs_process_resources:757] read RECOVERY_METHOD rest +epprd_rg:cl_activate_fs[activate_fs_process_resources:757] IFS=, +epprd_rg:cl_activate_fs[activate_fs_process_resources:760] : If there are any unmounted file systems for this resource group, go +epprd_rg:cl_activate_fs[activate_fs_process_resources:761] : recover and mount them. +epprd_rg:cl_activate_fs[activate_fs_process_resources:763] [[ -n /board_org,/oracle,/oracle/EPP,/oracle/EPP/mirrlogA,/oracle/EPP/mirrlogB,/oracle/EPP/oraarch,/oracle/EPP/origlogA,/oracle/EPP/origlogB,/oracle/EPP/sapdata1,/oracle/EPP/sapdata2,/oracle/EPP/sapdata3,/oracle/EPP/sapdata4,/sapmnt,/usr/sap ]] +epprd_rg:cl_activate_fs[activate_fs_process_resources:765] IFS=, +epprd_rg:cl_activate_fs[activate_fs_process_resources:765] set -- /board_org,/oracle,/oracle/EPP,/oracle/EPP/mirrlogA,/oracle/EPP/mirrlogB,/oracle/EPP/oraarch,/oracle/EPP/origlogA,/oracle/EPP/origlogB,/oracle/EPP/sapdata1,/oracle/EPP/sapdata2,/oracle/EPP/sapdata3,/oracle/EPP/sapdata4,/sapmnt,/usr/sap +epprd_rg:cl_activate_fs[activate_fs_process_resources:765] print /board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap +epprd_rg:cl_activate_fs[activate_fs_process_resources:765] RG_FILE_SYSTEMS='/board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap' +epprd_rg:cl_activate_fs[activate_fs_process_resources:766] activate_fs_process_group sequential fsck '/board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap' +epprd_rg:cl_activate_fs[activate_fs_process_group:362] PS4_LOOP='' +epprd_rg:cl_activate_fs[activate_fs_process_group:362] typeset PS4_LOOP +epprd_rg:cl_activate_fs[activate_fs_process_group:363] [[ high == high ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:363] set -x +epprd_rg:cl_activate_fs[activate_fs_process_group:365] typeset RECOVERY_METHOD FSCHECK_TOOL FILESYSTEMS +epprd_rg:cl_activate_fs[activate_fs_process_group:366] STATUS=0 +epprd_rg:cl_activate_fs[activate_fs_process_group:366] typeset -i STATUS +epprd_rg:cl_activate_fs[activate_fs_process_group:368] RECOVERY_METHOD=sequential +epprd_rg:cl_activate_fs[activate_fs_process_group:369] FSCHECK_TOOL=fsck +epprd_rg:cl_activate_fs[activate_fs_process_group:370] shift 2 +epprd_rg:cl_activate_fs[activate_fs_process_group:371] FILESYSTEMS='/board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap' +epprd_rg:cl_activate_fs[activate_fs_process_group:372] comm_failure='' +epprd_rg:cl_activate_fs[activate_fs_process_group:372] typeset comm_failure +epprd_rg:cl_activate_fs[activate_fs_process_group:373] rc_mount='' +epprd_rg:cl_activate_fs[activate_fs_process_group:373] typeset rc_mount +epprd_rg:cl_activate_fs[activate_fs_process_group:376] : Filter out duplicates, and file systems which are already mounted +epprd_rg:cl_activate_fs[activate_fs_process_group:378] mounts_to_do '/board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap' +epprd_rg:cl_activate_fs[mounts_to_do:283] tomount='/board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap' +epprd_rg:cl_activate_fs[mounts_to_do:283] typeset tomount +epprd_rg:cl_activate_fs[mounts_to_do:286] : Get most current list of mounted filesystems +epprd_rg:cl_activate_fs[mounts_to_do:288] mount +epprd_rg:cl_activate_fs[mounts_to_do:288] 2> /dev/null +epprd_rg:cl_activate_fs[mounts_to_do:288] paste -s - +epprd_rg:cl_activate_fs[mounts_to_do:288] awk '$3 ~ /jfs2*$/ {print $2}' +epprd_rg:cl_activate_fs[mounts_to_do:288] mounted=$'/\t/usr\t/var\t/tmp\t/home\t/admin\t/opt\t/var/adm/ras/livedump\t/ptf' +epprd_rg:cl_activate_fs[mounts_to_do:288] typeset mounted +epprd_rg:cl_activate_fs[mounts_to_do:291] shift +epprd_rg:cl_activate_fs[mounts_to_do:294] typeset -A mountedArray tomountArray +epprd_rg:cl_activate_fs[mounts_to_do:295] typeset fs +epprd_rg:cl_activate_fs[mounts_to_do:298] : Create an associative array for each list, which +epprd_rg:cl_activate_fs[mounts_to_do:299] : has the side effect of dropping any duplicates +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/usr]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/var]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/tmp]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/home]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/admin]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/opt]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/var/adm/ras/livedump]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/ptf]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/board_org]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/mirrlogA]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/mirrlogB]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/oraarch]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/origlogA]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/origlogB]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/sapdata1]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/sapdata2]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/sapdata3]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/sapdata4]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/sapmnt]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/usr/sap]=1 +epprd_rg:cl_activate_fs[mounts_to_do:310] mounted='' +epprd_rg:cl_activate_fs[mounts_to_do:311] tomount='' +epprd_rg:cl_activate_fs[mounts_to_do:314] : expand fs from all tomountArray subscript names +epprd_rg:cl_activate_fs[mounts_to_do:316] set +u +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:329] : Print all subscript names which are all remaining mount +epprd_rg:cl_activate_fs[mounts_to_do:330] : points which have to be mounted +epprd_rg:cl_activate_fs[mounts_to_do:332] print /board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap +epprd_rg:cl_activate_fs[mounts_to_do:332] sort -u +epprd_rg:cl_activate_fs[mounts_to_do:332] tr ' ' '\n' +epprd_rg:cl_activate_fs[mounts_to_do:334] set -u +epprd_rg:cl_activate_fs[activate_fs_process_group:378] FILESYSTEMS=$'/board_org\n/oracle\n/oracle/EPP\n/oracle/EPP/mirrlogA\n/oracle/EPP/mirrlogB\n/oracle/EPP/oraarch\n/oracle/EPP/origlogA\n/oracle/EPP/origlogB\n/oracle/EPP/sapdata1\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata4\n/sapmnt\n/usr/sap' +epprd_rg:cl_activate_fs[activate_fs_process_group:379] [[ -z $'/board_org\n/oracle\n/oracle/EPP\n/oracle/EPP/mirrlogA\n/oracle/EPP/mirrlogB\n/oracle/EPP/oraarch\n/oracle/EPP/origlogA\n/oracle/EPP/origlogB\n/oracle/EPP/sapdata1\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata4\n/sapmnt\n/usr/sap' ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:385] : Get unique temporary file names by using the resource group and the +epprd_rg:cl_activate_fs[activate_fs_process_group:386] : current process ID +epprd_rg:cl_activate_fs[activate_fs_process_group:388] [[ -z epprd_rg ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:397] TMP_FILENAME=epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs[activate_fs_process_group:398] rm -f /tmp/epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs[activate_fs_process_group:401] : If FSCHECK_TOOL is null get from ODM +epprd_rg:cl_activate_fs[activate_fs_process_group:403] [[ -z fsck ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:408] print fsck +epprd_rg:cl_activate_fs[activate_fs_process_group:408] FSCHECK_TOOL=fsck +epprd_rg:cl_activate_fs[activate_fs_process_group:409] [[ fsck != fsck ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:416] : If RECOVERY_METHOD is null get from ODM +epprd_rg:cl_activate_fs[activate_fs_process_group:418] [[ -z sequential ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:423] print sequential +epprd_rg:cl_activate_fs[activate_fs_process_group:423] RECOVERY_METHOD=sequential +epprd_rg:cl_activate_fs[activate_fs_process_group:424] [[ sequential != sequential ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:431] set -u +epprd_rg:cl_activate_fs[activate_fs_process_group:434] : If FSCHECK_TOOL is set to logredo, the logredo for each jfslog has +epprd_rg:cl_activate_fs[activate_fs_process_group:435] : already been done in get_disk_vg_fs, so we only need to do fsck check +epprd_rg:cl_activate_fs[activate_fs_process_group:436] : and recovery here before going on to do the mounts +epprd_rg:cl_activate_fs[activate_fs_process_group:438] [[ fsck == fsck ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:441] TOOL='/usr/sbin/fsck -f -p -o nologredo' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:445] PS4_LOOP=/board_org +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:446] lsfs /board_org +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:446] grep -w /board_org +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:449] : Verify if any of the file system /board_org is already mounted anywhere +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] lsfs -qc /board_org +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:463] fsdb /board_org +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/board_org\n\nFile System Size:\t\t10485032\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t16384\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000009ffd28\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t91\n[10] s_agsize:\t\t0x00004000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x0013ffa5\n \t\t s_fsckpxd.address:\t1310629\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'boardl\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5832\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000000b5\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t181\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc37\t[52] last unmounted:\t0x63d4fdf5\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/board_org\n\nFile System Size:\t\t10485032\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t16384\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000009ffd28\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t91\n[10] s_agsize:\t\t0x00004000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x0013ffa5\n \t\t s_fsckpxd.address:\t1310629\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'boardl\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5832\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000000b5\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t181\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc37\t[52] last unmounted:\t0x63d4fdf5\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/boardlv The current volume is: /dev/boardlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:445] PS4_LOOP=/oracle +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:446] lsfs /oracle +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:446] grep -w /oracle +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:449] : Verify if any of the file system /oracle is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] lsfs -qc /oracle +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:463] fsdb /oracle +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle\n\nFile System Size:\t\t41941352\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t65536\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000027ff968\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t211\n[10] s_agsize:\t\t0x00010000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x004fff2d\n \t\t s_fsckpxd.address:\t5242669\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'oracle\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5819\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000295\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t661\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc37\t[52] last unmounted:\t0x63d4fdf4\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle\n\nFile System Size:\t\t41941352\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t65536\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000027ff968\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t211\n[10] s_agsize:\t\t0x00010000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x004fff2d\n \t\t s_fsckpxd.address:\t5242669\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'oracle\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5819\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000295\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t661\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc37\t[52] last unmounted:\t0x63d4fdf4\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/oraclelv The current volume is: /dev/oraclelv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:446] lsfs /oracle/EPP +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:446] grep -w /oracle/EPP +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] lsfs -qc /oracle/EPP +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:463] fsdb /oracle/EPP +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP\n\nFile System Size:\t\t62912232\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t65536\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x0000000003bff6e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t291\n[10] s_agsize:\t\t0x00010000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x0077fedd\n \t\t s_fsckpxd.address:\t7864029\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'epplv\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5824\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000003d5\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t981\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc37\t[52] last unmounted:\t0x63d4fdf4\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP\n\nFile System Size:\t\t62912232\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t65536\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x0000000003bff6e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t291\n[10] s_agsize:\t\t0x00010000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x0077fedd\n \t\t s_fsckpxd.address:\t7864029\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'epplv\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5824\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000003d5\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t981\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc37\t[52] last unmounted:\t0x63d4fdf4\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/epplv The current volume is: /dev/epplv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:446] lsfs /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:446] grep -w /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/mirrlogA is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] lsfs -qc /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:463] fsdb /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/mirrlogA\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'mirrlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5834\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc37\t[52] last unmounted:\t0x63d4fdf4\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/mirrlogA\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'mirrlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5834\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc37\t[52] last unmounted:\t0x63d4fdf4\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/mirrlogAlv The current volume is: /dev/mirrlogAlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:446] lsfs /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:446] grep -w /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/mirrlogB is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] lsfs -qc /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:463] fsdb /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/mirrlogB\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'mirrlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5835\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc38\t[52] last unmounted:\t0x63d4fdf4\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/mirrlogB\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'mirrlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5835\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc38\t[52] last unmounted:\t0x63d4fdf4\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/mirrlogBlv The current volume is: /dev/mirrlogBlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/oraarch +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:446] lsfs /oracle/EPP/oraarch +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:446] grep -w /oracle/EPP/oraarch +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/oraarch is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] lsfs -qc /oracle/EPP/oraarch +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:463] fsdb /oracle/EPP/oraarch +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/oraarch\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'oraarc\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d582e\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc38\t[52] last unmounted:\t0x63d4fdf4\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/oraarch\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'oraarc\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d582e\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc38\t[52] last unmounted:\t0x63d4fdf4\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/oraarchlv The current volume is: /dev/oraarchlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/origlogA +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:446] lsfs /oracle/EPP/origlogA +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:446] grep -w /oracle/EPP/origlogA +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/origlogA is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] lsfs -qc /oracle/EPP/origlogA +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:463] fsdb /oracle/EPP/origlogA +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/origlogA\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'origlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5832\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc38\t[52] last unmounted:\t0x63d4fdf3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/origlogA\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'origlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5832\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc38\t[52] last unmounted:\t0x63d4fdf3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/origlogAlv The current volume is: /dev/origlogAlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/origlogB +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:446] lsfs /oracle/EPP/origlogB +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:446] grep -w /oracle/EPP/origlogB +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/origlogB is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] lsfs -qc /oracle/EPP/origlogB +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:463] fsdb /oracle/EPP/origlogB +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/origlogB\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'origlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5833\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc38\t[52] last unmounted:\t0x63d4fdf3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/origlogB\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'origlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5833\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc38\t[52] last unmounted:\t0x63d4fdf3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/origlogBlv The current volume is: /dev/origlogBlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:446] lsfs /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:446] grep -w /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/sapdata1 is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] lsfs -qc /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:463] fsdb /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/sapdata1\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d582f\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc38\t[52] last unmounted:\t0x63d4fdf3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/sapdata1\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d582f\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc38\t[52] last unmounted:\t0x63d4fdf3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/sapdata1lv The current volume is: /dev/sapdata1lv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:446] lsfs /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:446] grep -w /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/sapdata2 is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] lsfs -qc /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:463] fsdb /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/sapdata2\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5830\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc38\t[52] last unmounted:\t0x63d4fdf3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/sapdata2\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5830\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc38\t[52] last unmounted:\t0x63d4fdf3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/sapdata2lv The current volume is: /dev/sapdata2lv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:446] lsfs /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:446] grep -w /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/sapdata3 is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] lsfs -qc /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:463] fsdb /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/sapdata3\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5831\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc38\t[52] last unmounted:\t0x63d4fdf3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/sapdata3\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5831\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc38\t[52] last unmounted:\t0x63d4fdf3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/sapdata3lv The current volume is: /dev/sapdata3lv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:446] lsfs /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:446] grep -w /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/sapdata4 is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] lsfs -qc /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:463] fsdb /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/sapdata4\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5831\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc38\t[52] last unmounted:\t0x63d4fdf3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/sapdata4\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5831\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc38\t[52] last unmounted:\t0x63d4fdf3\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/sapdata4lv The current volume is: /dev/sapdata4lv Primary superblock is valid. +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:445] PS4_LOOP=/sapmnt +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:446] lsfs /sapmnt +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:446] grep -w /sapmnt +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:449] : Verify if any of the file system /sapmnt is already mounted anywhere +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] lsfs -qc /sapmnt +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:463] fsdb /sapmnt +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/sapmnt\n\nFile System Size:\t\t20970472\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t32768\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000013ffbe8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t131\n[10] s_agsize:\t\t0x00008000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x0027ff7d\n \t\t s_fsckpxd.address:\t2621309\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapmnt\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5818\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000155\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t341\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc39\t[52] last unmounted:\t0x63d4fdf2\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/sapmnt\n\nFile System Size:\t\t20970472\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t32768\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000013ffbe8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t131\n[10] s_agsize:\t\t0x00008000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x0027ff7d\n \t\t s_fsckpxd.address:\t2621309\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapmnt\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5818\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000155\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t341\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc39\t[52] last unmounted:\t0x63d4fdf2\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/sapmntlv The current volume is: /dev/sapmntlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:445] PS4_LOOP=/usr/sap +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:446] lsfs /usr/sap +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:446] grep -w /usr/sap +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:449] : Verify if any of the file system /usr/sap is already mounted anywhere +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] lsfs -qc /usr/sap +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:463] fsdb /usr/sap +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/usr/sap\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'saplv\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5815\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc39\t[52] last unmounted:\t0x63d4fdf2\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/usr/sap\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000005\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'saplv\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5815\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fc39\t[52] last unmounted:\t0x63d4fdf2\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/saplv The current volume is: /dev/saplv Primary superblock is valid. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:513] : Allow any backgrounded fsck operations to finish +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:515] wait +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:519] : Now attempt to mount all the file systems +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:521] ALLFS=All_filesystems +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:522] cl_RMupdate resource_acquiring All_filesystems cl_activate_fs 2023-01-28T19:50:44.797219 2023-01-28T19:50:44.801750 +epprd_rg:cl_activate_fs(0.794):/usr/sap[activate_fs_process_group:524] PS4_TIMER=true +epprd_rg:cl_activate_fs(0.794):/usr/sap[activate_fs_process_group:524] typeset PS4_TIMER +epprd_rg:cl_activate_fs(0.794):/board_org[activate_fs_process_group:527] PS4_LOOP=/board_org +epprd_rg:cl_activate_fs(0.794):/board_org[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(0.794):/board_org[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(0.794):/board_org[activate_fs_process_group:540] fs_mount /board_org fsck epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(0.795):/board_org[fs_mount:69] FS=/board_org +epprd_rg:cl_activate_fs(0.795):/board_org[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(0.795):/board_org[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(0.795):/board_org[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(0.795):/board_org[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(0.795):/board_org[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(0.795):/board_org[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(0.795):/board_org[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(0.795):/board_org[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(0.795):/board_org[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(0.795):/board_org[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(0.795):/board_org[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(0.795):/board_org[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(0.795):/board_org[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(0.795):/board_org[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(0.795):/board_org[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(0.795):/board_org[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(0.795):/board_org[fs_mount:81] : Here check to see if the information in /etc/filesystems for /board_org +epprd_rg:cl_activate_fs(0.795):/board_org[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(0.795):/board_org[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(0.795):/board_org[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(0.795):/board_org[fs_mount:86] lsfs -c /board_org +epprd_rg:cl_activate_fs(0.796):/board_org[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(0.801):/board_org[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(0.796):/board_org[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(0.801):/board_org[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(0.801):/board_org[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(0.802):/board_org[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(0.796):/board_org[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(0.802):/board_org[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(0.803):/board_org[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(0.804):/board_org[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(0.804):/board_org[fs_mount:100] LV_name=boardlv +epprd_rg:cl_activate_fs(0.805):/board_org[fs_mount:101] getlvcb -T -A boardlv +epprd_rg:cl_activate_fs(0.805):/board_org[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(0.823):/board_org[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(0.806):/board_org[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.11 \n\t lvname = boardlv \n\t label = /board_org \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:47 2022\n \t time modified = Sat Jan 28 17:10:40 2023\n ' +epprd_rg:cl_activate_fs(0.823):/board_org[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(0.823):/board_org[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(0.824):/board_org[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(0.806):/board_org[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.11 \n\t lvname = boardlv \n\t label = /board_org \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:47 2022\n \t time modified = Sat Jan 28 17:10:40 2023\n ' +epprd_rg:cl_activate_fs(0.825):/board_org[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(0.826):/board_org[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(0.827):/board_org[fs_mount:115] clodmget -q 'name = boardlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(0.831):/board_org[fs_mount:115] CuAt_label=/board_org +epprd_rg:cl_activate_fs(0.831):/board_org[fs_mount:118] : At this point, if things are working correctly, /board_org from /etc/filesystems +epprd_rg:cl_activate_fs(0.831):/board_org[fs_mount:119] : should match /board_org from CuAt ODM and /board_org from the LVCB +epprd_rg:cl_activate_fs(0.831):/board_org[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(0.831):/board_org[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(0.831):/board_org[fs_mount:123] [[ /board_org != /board_org ]] +epprd_rg:cl_activate_fs(0.831):/board_org[fs_mount:128] [[ /board_org != /board_org ]] +epprd_rg:cl_activate_fs(0.831):/board_org[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(0.831):/board_org[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(0.831):/board_org[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(0.851):/board_org[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(0.851):/board_org[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(0.851):/board_org[fs_mount:160] amlog_trace '' 'Activating Filesystem|/board_org' +epprd_rg:cl_activate_fs(0.851):/board_org[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(0.852):/board_org[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(0.876):/board_org[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(0.879):/board_org[amlog_trace:319] DATE=2023-01-28T19:50:44.887016 +epprd_rg:cl_activate_fs(0.879):/board_org[amlog_trace:320] echo '|2023-01-28T19:50:44.887016|INFO: Activating Filesystem|/board_org' +epprd_rg:cl_activate_fs(0.879):/board_org[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(0.879):/board_org[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(0.882):/board_org[fs_mount:162] : Try to mount filesystem /board_org at Jan 28 19:50:44.000 +epprd_rg:cl_activate_fs(0.882):/board_org[fs_mount:163] mount /board_org +epprd_rg:cl_activate_fs(0.894):/board_org[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(0.894):/board_org[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(0.894):/board_org[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(0.894):/board_org[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/board_org' +epprd_rg:cl_activate_fs(0.894):/board_org[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(0.895):/board_org[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(0.920):/board_org[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(0.922):/board_org[amlog_trace:319] DATE=2023-01-28T19:50:44.930161 +epprd_rg:cl_activate_fs(0.922):/board_org[amlog_trace:320] echo '|2023-01-28T19:50:44.930161|INFO: Activating Filesystems completed|/board_org' +epprd_rg:cl_activate_fs(0.922):/board_org[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(0.922):/board_org[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(0.922):/board_org[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(0.922):/board_org[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(0.922):/board_org[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(0.923):/board_org[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(0.923):/board_org[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(0.923):/board_org[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(0.923):/board_org[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(0.923):/board_org[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(0.923):/board_org[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(0.924):/board_org[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(0.924):/board_org[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(0.926):/board_org[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(0.926):/board_org[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(0.926):/board_org[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(0.926):/board_org[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(0.926):/board_org[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(0.926):/board_org[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(0.926):/board_org[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(0.926):/board_org[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(0.806):/board_org[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.11 \n\t lvname = boardlv \n\t label = /board_org \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:47 2022\n \t time modified = Sat Jan 28 17:10:40 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(0.926):/board_org[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(0.927):/oracle[activate_fs_process_group:527] PS4_LOOP=/oracle +epprd_rg:cl_activate_fs(0.927):/oracle[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(0.927):/oracle[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(0.927):/oracle[activate_fs_process_group:540] fs_mount /oracle fsck epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(0.927):/oracle[fs_mount:69] FS=/oracle +epprd_rg:cl_activate_fs(0.927):/oracle[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(0.927):/oracle[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(0.927):/oracle[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(0.927):/oracle[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(0.927):/oracle[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(0.927):/oracle[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(0.927):/oracle[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(0.927):/oracle[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(0.927):/oracle[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(0.927):/oracle[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(0.927):/oracle[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(0.927):/oracle[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(0.927):/oracle[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(0.927):/oracle[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(0.927):/oracle[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(0.927):/oracle[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(0.927):/oracle[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle +epprd_rg:cl_activate_fs(0.927):/oracle[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(0.927):/oracle[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(0.927):/oracle[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(0.927):/oracle[fs_mount:86] lsfs -c /oracle +epprd_rg:cl_activate_fs(0.928):/oracle[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(0.933):/oracle[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(0.928):/oracle[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle:/dev/oraclelv:jfs2:::41943040:rw:no:no' +epprd_rg:cl_activate_fs(0.933):/oracle[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(0.933):/oracle[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(0.934):/oracle[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(0.928):/oracle[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle:/dev/oraclelv:jfs2:::41943040:rw:no:no' +epprd_rg:cl_activate_fs(0.935):/oracle[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(0.936):/oracle[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(0.936):/oracle[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(0.937):/oracle[fs_mount:100] LV_name=oraclelv +epprd_rg:cl_activate_fs(0.937):/oracle[fs_mount:101] getlvcb -T -A oraclelv +epprd_rg:cl_activate_fs(0.938):/oracle[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(0.956):/oracle[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(0.938):/oracle[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.4 \n\t lvname = oraclelv \n\t label = /oracle \n\t machine id = 44AF14B00 \n\t number lps = 40 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:42 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' +epprd_rg:cl_activate_fs(0.956):/oracle[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(0.956):/oracle[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(0.958):/oracle[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(0.938):/oracle[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.4 \n\t lvname = oraclelv \n\t label = /oracle \n\t machine id = 44AF14B00 \n\t number lps = 40 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:42 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' +epprd_rg:cl_activate_fs(0.958):/oracle[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(0.959):/oracle[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(0.960):/oracle[fs_mount:115] clodmget -q 'name = oraclelv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(0.964):/oracle[fs_mount:115] CuAt_label=/oracle +epprd_rg:cl_activate_fs(0.964):/oracle[fs_mount:118] : At this point, if things are working correctly, /oracle from /etc/filesystems +epprd_rg:cl_activate_fs(0.964):/oracle[fs_mount:119] : should match /oracle from CuAt ODM and /oracle from the LVCB +epprd_rg:cl_activate_fs(0.964):/oracle[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(0.964):/oracle[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(0.964):/oracle[fs_mount:123] [[ /oracle != /oracle ]] +epprd_rg:cl_activate_fs(0.964):/oracle[fs_mount:128] [[ /oracle != /oracle ]] +epprd_rg:cl_activate_fs(0.964):/oracle[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(0.964):/oracle[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(0.964):/oracle[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(0.984):/oracle[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(0.984):/oracle[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(0.984):/oracle[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle' +epprd_rg:cl_activate_fs(0.984):/oracle[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(0.985):/oracle[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.010):/oracle[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.012):/oracle[amlog_trace:319] DATE=2023-01-28T19:50:45.020198 +epprd_rg:cl_activate_fs(1.012):/oracle[amlog_trace:320] echo '|2023-01-28T19:50:45.020198|INFO: Activating Filesystem|/oracle' +epprd_rg:cl_activate_fs(1.012):/oracle[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.012):/oracle[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.015):/oracle[fs_mount:162] : Try to mount filesystem /oracle at Jan 28 19:50:45.000 +epprd_rg:cl_activate_fs(1.015):/oracle[fs_mount:163] mount /oracle +epprd_rg:cl_activate_fs(1.027):/oracle[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.027):/oracle[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.027):/oracle[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.027):/oracle[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle' +epprd_rg:cl_activate_fs(1.027):/oracle[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.028):/oracle[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.052):/oracle[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.055):/oracle[amlog_trace:319] DATE=2023-01-28T19:50:45.062686 +epprd_rg:cl_activate_fs(1.055):/oracle[amlog_trace:320] echo '|2023-01-28T19:50:45.062686|INFO: Activating Filesystems completed|/oracle' +epprd_rg:cl_activate_fs(1.055):/oracle[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.055):/oracle[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.055):/oracle[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.055):/oracle[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.055):/oracle[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.055):/oracle[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.055):/oracle[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.055):/oracle[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.055):/oracle[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.055):/oracle[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.055):/oracle[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.056):/oracle[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.057):/oracle[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.059):/oracle[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.059):/oracle[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.059):/oracle[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.059):/oracle[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.059):/oracle[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.059):/oracle[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.059):/oracle[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.059):/oracle[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(0.938):/oracle[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.4 \n\t lvname = oraclelv \n\t label = /oracle \n\t machine id = 44AF14B00 \n\t number lps = 40 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:42 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.059):/oracle[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[activate_fs_process_group:540] fs_mount /oracle/EPP fsck epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:69] FS=/oracle/EPP +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.059):/oracle/EPP[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.060):/oracle/EPP[fs_mount:86] lsfs -c /oracle/EPP +epprd_rg:cl_activate_fs(1.060):/oracle/EPP[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.066):/oracle/EPP[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.061):/oracle/EPP[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP:/dev/epplv:jfs2:::62914560:rw:no:no' +epprd_rg:cl_activate_fs(1.066):/oracle/EPP[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.066):/oracle/EPP[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.067):/oracle/EPP[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.061):/oracle/EPP[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP:/dev/epplv:jfs2:::62914560:rw:no:no' +epprd_rg:cl_activate_fs(1.068):/oracle/EPP[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.068):/oracle/EPP[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.068):/oracle/EPP[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.069):/oracle/EPP[fs_mount:100] LV_name=epplv +epprd_rg:cl_activate_fs(1.070):/oracle/EPP[fs_mount:101] getlvcb -T -A epplv +epprd_rg:cl_activate_fs(1.070):/oracle/EPP[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.088):/oracle/EPP[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.071):/oracle/EPP[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.5 \n\t lvname = epplv \n\t label = /oracle/EPP \n\t machine id = 44AF14B00 \n\t number lps = 60 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' +epprd_rg:cl_activate_fs(1.088):/oracle/EPP[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.088):/oracle/EPP[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.089):/oracle/EPP[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.071):/oracle/EPP[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.5 \n\t lvname = epplv \n\t label = /oracle/EPP \n\t machine id = 44AF14B00 \n\t number lps = 60 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' +epprd_rg:cl_activate_fs(1.090):/oracle/EPP[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.091):/oracle/EPP[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.092):/oracle/EPP[fs_mount:115] clodmget -q 'name = epplv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.095):/oracle/EPP[fs_mount:115] CuAt_label=/oracle/EPP +epprd_rg:cl_activate_fs(1.096):/oracle/EPP[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP from /etc/filesystems +epprd_rg:cl_activate_fs(1.096):/oracle/EPP[fs_mount:119] : should match /oracle/EPP from CuAt ODM and /oracle/EPP from the LVCB +epprd_rg:cl_activate_fs(1.096):/oracle/EPP[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.096):/oracle/EPP[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.096):/oracle/EPP[fs_mount:123] [[ /oracle/EPP != /oracle/EPP ]] +epprd_rg:cl_activate_fs(1.096):/oracle/EPP[fs_mount:128] [[ /oracle/EPP != /oracle/EPP ]] +epprd_rg:cl_activate_fs(1.096):/oracle/EPP[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.096):/oracle/EPP[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.096):/oracle/EPP[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.116):/oracle/EPP[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.116):/oracle/EPP[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.116):/oracle/EPP[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP' +epprd_rg:cl_activate_fs(1.116):/oracle/EPP[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.117):/oracle/EPP[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.142):/oracle/EPP[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.144):/oracle/EPP[amlog_trace:319] DATE=2023-01-28T19:50:45.152140 +epprd_rg:cl_activate_fs(1.144):/oracle/EPP[amlog_trace:320] echo '|2023-01-28T19:50:45.152140|INFO: Activating Filesystem|/oracle/EPP' +epprd_rg:cl_activate_fs(1.144):/oracle/EPP[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.144):/oracle/EPP[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.147):/oracle/EPP[fs_mount:162] : Try to mount filesystem /oracle/EPP at Jan 28 19:50:45.000 +epprd_rg:cl_activate_fs(1.147):/oracle/EPP[fs_mount:163] mount /oracle/EPP +epprd_rg:cl_activate_fs(1.159):/oracle/EPP[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.159):/oracle/EPP[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.159):/oracle/EPP[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.159):/oracle/EPP[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP' +epprd_rg:cl_activate_fs(1.159):/oracle/EPP[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.160):/oracle/EPP[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.185):/oracle/EPP[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.187):/oracle/EPP[amlog_trace:319] DATE=2023-01-28T19:50:45.195360 +epprd_rg:cl_activate_fs(1.188):/oracle/EPP[amlog_trace:320] echo '|2023-01-28T19:50:45.195360|INFO: Activating Filesystems completed|/oracle/EPP' +epprd_rg:cl_activate_fs(1.188):/oracle/EPP[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.188):/oracle/EPP[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.188):/oracle/EPP[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.188):/oracle/EPP[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.188):/oracle/EPP[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.188):/oracle/EPP[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.188):/oracle/EPP[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.188):/oracle/EPP[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.188):/oracle/EPP[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.188):/oracle/EPP[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.188):/oracle/EPP[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.189):/oracle/EPP[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.189):/oracle/EPP[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.191):/oracle/EPP[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.191):/oracle/EPP[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.191):/oracle/EPP[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.191):/oracle/EPP[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.191):/oracle/EPP[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.192):/oracle/EPP[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.192):/oracle/EPP[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.192):/oracle/EPP[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.071):/oracle/EPP[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.5 \n\t lvname = epplv \n\t label = /oracle/EPP \n\t machine id = 44AF14B00 \n\t number lps = 60 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.192):/oracle/EPP[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[activate_fs_process_group:540] fs_mount /oracle/EPP/mirrlogA fsck epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[fs_mount:69] FS=/oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.192):/oracle/EPP/mirrlogA[fs_mount:86] lsfs -c /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.193):/oracle/EPP/mirrlogA[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.198):/oracle/EPP/mirrlogA[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.193):/oracle/EPP/mirrlogA[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogA:/dev/mirrlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.198):/oracle/EPP/mirrlogA[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.198):/oracle/EPP/mirrlogA[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.199):/oracle/EPP/mirrlogA[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.193):/oracle/EPP/mirrlogA[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogA:/dev/mirrlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.200):/oracle/EPP/mirrlogA[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.201):/oracle/EPP/mirrlogA[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.201):/oracle/EPP/mirrlogA[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.202):/oracle/EPP/mirrlogA[fs_mount:100] LV_name=mirrlogAlv +epprd_rg:cl_activate_fs(1.202):/oracle/EPP/mirrlogA[fs_mount:101] getlvcb -T -A mirrlogAlv +epprd_rg:cl_activate_fs(1.203):/oracle/EPP/mirrlogA[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.221):/oracle/EPP/mirrlogA[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.203):/oracle/EPP/mirrlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.14 \n\t lvname = mirrlogAlv \n\t label = /oracle/EPP/mirrlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' +epprd_rg:cl_activate_fs(1.221):/oracle/EPP/mirrlogA[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.221):/oracle/EPP/mirrlogA[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.222):/oracle/EPP/mirrlogA[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.203):/oracle/EPP/mirrlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.14 \n\t lvname = mirrlogAlv \n\t label = /oracle/EPP/mirrlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' +epprd_rg:cl_activate_fs(1.223):/oracle/EPP/mirrlogA[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.224):/oracle/EPP/mirrlogA[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.225):/oracle/EPP/mirrlogA[fs_mount:115] clodmget -q 'name = mirrlogAlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.229):/oracle/EPP/mirrlogA[fs_mount:115] CuAt_label=/oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.229):/oracle/EPP/mirrlogA[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/mirrlogA from /etc/filesystems +epprd_rg:cl_activate_fs(1.229):/oracle/EPP/mirrlogA[fs_mount:119] : should match /oracle/EPP/mirrlogA from CuAt ODM and /oracle/EPP/mirrlogA from the LVCB +epprd_rg:cl_activate_fs(1.229):/oracle/EPP/mirrlogA[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.229):/oracle/EPP/mirrlogA[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.229):/oracle/EPP/mirrlogA[fs_mount:123] [[ /oracle/EPP/mirrlogA != /oracle/EPP/mirrlogA ]] +epprd_rg:cl_activate_fs(1.229):/oracle/EPP/mirrlogA[fs_mount:128] [[ /oracle/EPP/mirrlogA != /oracle/EPP/mirrlogA ]] +epprd_rg:cl_activate_fs(1.229):/oracle/EPP/mirrlogA[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.229):/oracle/EPP/mirrlogA[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.229):/oracle/EPP/mirrlogA[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.249):/oracle/EPP/mirrlogA[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.249):/oracle/EPP/mirrlogA[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.249):/oracle/EPP/mirrlogA[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_activate_fs(1.249):/oracle/EPP/mirrlogA[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.250):/oracle/EPP/mirrlogA[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.274):/oracle/EPP/mirrlogA[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.277):/oracle/EPP/mirrlogA[amlog_trace:319] DATE=2023-01-28T19:50:45.284954 +epprd_rg:cl_activate_fs(1.277):/oracle/EPP/mirrlogA[amlog_trace:320] echo '|2023-01-28T19:50:45.284954|INFO: Activating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_activate_fs(1.277):/oracle/EPP/mirrlogA[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.277):/oracle/EPP/mirrlogA[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.280):/oracle/EPP/mirrlogA[fs_mount:162] : Try to mount filesystem /oracle/EPP/mirrlogA at Jan 28 19:50:45.000 +epprd_rg:cl_activate_fs(1.280):/oracle/EPP/mirrlogA[fs_mount:163] mount /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.292):/oracle/EPP/mirrlogA[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.292):/oracle/EPP/mirrlogA[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.292):/oracle/EPP/mirrlogA[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.292):/oracle/EPP/mirrlogA[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/mirrlogA' +epprd_rg:cl_activate_fs(1.292):/oracle/EPP/mirrlogA[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.293):/oracle/EPP/mirrlogA[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.317):/oracle/EPP/mirrlogA[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.320):/oracle/EPP/mirrlogA[amlog_trace:319] DATE=2023-01-28T19:50:45.328106 +epprd_rg:cl_activate_fs(1.320):/oracle/EPP/mirrlogA[amlog_trace:320] echo '|2023-01-28T19:50:45.328106|INFO: Activating Filesystems completed|/oracle/EPP/mirrlogA' +epprd_rg:cl_activate_fs(1.320):/oracle/EPP/mirrlogA[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.320):/oracle/EPP/mirrlogA[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.320):/oracle/EPP/mirrlogA[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.320):/oracle/EPP/mirrlogA[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.320):/oracle/EPP/mirrlogA[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.320):/oracle/EPP/mirrlogA[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.320):/oracle/EPP/mirrlogA[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.320):/oracle/EPP/mirrlogA[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.320):/oracle/EPP/mirrlogA[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.320):/oracle/EPP/mirrlogA[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.320):/oracle/EPP/mirrlogA[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.321):/oracle/EPP/mirrlogA[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.322):/oracle/EPP/mirrlogA[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.324):/oracle/EPP/mirrlogA[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.324):/oracle/EPP/mirrlogA[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.324):/oracle/EPP/mirrlogA[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.324):/oracle/EPP/mirrlogA[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.324):/oracle/EPP/mirrlogA[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.324):/oracle/EPP/mirrlogA[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.324):/oracle/EPP/mirrlogA[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.324):/oracle/EPP/mirrlogA[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.203):/oracle/EPP/mirrlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.14 \n\t lvname = mirrlogAlv \n\t label = /oracle/EPP/mirrlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.324):/oracle/EPP/mirrlogA[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[activate_fs_process_group:540] fs_mount /oracle/EPP/mirrlogB fsck epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:69] FS=/oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:86] lsfs -c /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.326):/oracle/EPP/mirrlogB[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.331):/oracle/EPP/mirrlogB[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.326):/oracle/EPP/mirrlogB[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogB:/dev/mirrlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.331):/oracle/EPP/mirrlogB[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.331):/oracle/EPP/mirrlogB[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.332):/oracle/EPP/mirrlogB[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.326):/oracle/EPP/mirrlogB[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogB:/dev/mirrlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.332):/oracle/EPP/mirrlogB[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.333):/oracle/EPP/mirrlogB[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.333):/oracle/EPP/mirrlogB[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.335):/oracle/EPP/mirrlogB[fs_mount:100] LV_name=mirrlogBlv +epprd_rg:cl_activate_fs(1.335):/oracle/EPP/mirrlogB[fs_mount:101] getlvcb -T -A mirrlogBlv +epprd_rg:cl_activate_fs(1.336):/oracle/EPP/mirrlogB[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.354):/oracle/EPP/mirrlogB[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.336):/oracle/EPP/mirrlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.15 \n\t lvname = mirrlogBlv \n\t label = /oracle/EPP/mirrlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:50 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.354):/oracle/EPP/mirrlogB[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.354):/oracle/EPP/mirrlogB[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.355):/oracle/EPP/mirrlogB[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.336):/oracle/EPP/mirrlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.15 \n\t lvname = mirrlogBlv \n\t label = /oracle/EPP/mirrlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:50 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.356):/oracle/EPP/mirrlogB[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.357):/oracle/EPP/mirrlogB[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.358):/oracle/EPP/mirrlogB[fs_mount:115] clodmget -q 'name = mirrlogBlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.362):/oracle/EPP/mirrlogB[fs_mount:115] CuAt_label=/oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.362):/oracle/EPP/mirrlogB[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/mirrlogB from /etc/filesystems +epprd_rg:cl_activate_fs(1.362):/oracle/EPP/mirrlogB[fs_mount:119] : should match /oracle/EPP/mirrlogB from CuAt ODM and /oracle/EPP/mirrlogB from the LVCB +epprd_rg:cl_activate_fs(1.362):/oracle/EPP/mirrlogB[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.362):/oracle/EPP/mirrlogB[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.362):/oracle/EPP/mirrlogB[fs_mount:123] [[ /oracle/EPP/mirrlogB != /oracle/EPP/mirrlogB ]] +epprd_rg:cl_activate_fs(1.362):/oracle/EPP/mirrlogB[fs_mount:128] [[ /oracle/EPP/mirrlogB != /oracle/EPP/mirrlogB ]] +epprd_rg:cl_activate_fs(1.362):/oracle/EPP/mirrlogB[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.362):/oracle/EPP/mirrlogB[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.362):/oracle/EPP/mirrlogB[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.382):/oracle/EPP/mirrlogB[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.383):/oracle/EPP/mirrlogB[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.383):/oracle/EPP/mirrlogB[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_activate_fs(1.383):/oracle/EPP/mirrlogB[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.383):/oracle/EPP/mirrlogB[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.408):/oracle/EPP/mirrlogB[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.411):/oracle/EPP/mirrlogB[amlog_trace:319] DATE=2023-01-28T19:50:45.418493 +epprd_rg:cl_activate_fs(1.411):/oracle/EPP/mirrlogB[amlog_trace:320] echo '|2023-01-28T19:50:45.418493|INFO: Activating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_activate_fs(1.411):/oracle/EPP/mirrlogB[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.411):/oracle/EPP/mirrlogB[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/mirrlogB[fs_mount:162] : Try to mount filesystem /oracle/EPP/mirrlogB at Jan 28 19:50:45.000 +epprd_rg:cl_activate_fs(1.413):/oracle/EPP/mirrlogB[fs_mount:163] mount /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.425):/oracle/EPP/mirrlogB[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.425):/oracle/EPP/mirrlogB[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.425):/oracle/EPP/mirrlogB[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.425):/oracle/EPP/mirrlogB[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/mirrlogB' +epprd_rg:cl_activate_fs(1.425):/oracle/EPP/mirrlogB[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.426):/oracle/EPP/mirrlogB[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/mirrlogB[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.453):/oracle/EPP/mirrlogB[amlog_trace:319] DATE=2023-01-28T19:50:45.461038 +epprd_rg:cl_activate_fs(1.453):/oracle/EPP/mirrlogB[amlog_trace:320] echo '|2023-01-28T19:50:45.461038|INFO: Activating Filesystems completed|/oracle/EPP/mirrlogB' +epprd_rg:cl_activate_fs(1.453):/oracle/EPP/mirrlogB[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.453):/oracle/EPP/mirrlogB[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.453):/oracle/EPP/mirrlogB[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.453):/oracle/EPP/mirrlogB[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.453):/oracle/EPP/mirrlogB[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.453):/oracle/EPP/mirrlogB[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.453):/oracle/EPP/mirrlogB[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.453):/oracle/EPP/mirrlogB[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.453):/oracle/EPP/mirrlogB[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.453):/oracle/EPP/mirrlogB[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.453):/oracle/EPP/mirrlogB[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.454):/oracle/EPP/mirrlogB[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.455):/oracle/EPP/mirrlogB[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.457):/oracle/EPP/mirrlogB[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.457):/oracle/EPP/mirrlogB[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.457):/oracle/EPP/mirrlogB[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.457):/oracle/EPP/mirrlogB[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.457):/oracle/EPP/mirrlogB[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.457):/oracle/EPP/mirrlogB[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.457):/oracle/EPP/mirrlogB[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.457):/oracle/EPP/mirrlogB[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.336):/oracle/EPP/mirrlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.15 \n\t lvname = mirrlogBlv \n\t label = /oracle/EPP/mirrlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:50 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.457):/oracle/EPP/mirrlogB[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.457):/oracle/EPP/oraarch[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/oraarch +epprd_rg:cl_activate_fs(1.457):/oracle/EPP/oraarch[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.457):/oracle/EPP/oraarch[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.457):/oracle/EPP/oraarch[activate_fs_process_group:540] fs_mount /oracle/EPP/oraarch fsck epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(1.457):/oracle/EPP/oraarch[fs_mount:69] FS=/oracle/EPP/oraarch +epprd_rg:cl_activate_fs(1.458):/oracle/EPP/oraarch[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.458):/oracle/EPP/oraarch[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.458):/oracle/EPP/oraarch[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.458):/oracle/EPP/oraarch[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(1.458):/oracle/EPP/oraarch[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.458):/oracle/EPP/oraarch[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.458):/oracle/EPP/oraarch[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.458):/oracle/EPP/oraarch[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.458):/oracle/EPP/oraarch[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.458):/oracle/EPP/oraarch[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.458):/oracle/EPP/oraarch[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.458):/oracle/EPP/oraarch[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.458):/oracle/EPP/oraarch[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.458):/oracle/EPP/oraarch[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.458):/oracle/EPP/oraarch[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.458):/oracle/EPP/oraarch[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.458):/oracle/EPP/oraarch[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/oraarch +epprd_rg:cl_activate_fs(1.458):/oracle/EPP/oraarch[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.458):/oracle/EPP/oraarch[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.458):/oracle/EPP/oraarch[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.458):/oracle/EPP/oraarch[fs_mount:86] lsfs -c /oracle/EPP/oraarch +epprd_rg:cl_activate_fs(1.459):/oracle/EPP/oraarch[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.464):/oracle/EPP/oraarch[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.459):/oracle/EPP/oraarch[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/oraarch:/dev/oraarchlv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(1.464):/oracle/EPP/oraarch[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.464):/oracle/EPP/oraarch[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.465):/oracle/EPP/oraarch[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.459):/oracle/EPP/oraarch[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/oraarch:/dev/oraarchlv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(1.466):/oracle/EPP/oraarch[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.466):/oracle/EPP/oraarch[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.466):/oracle/EPP/oraarch[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.468):/oracle/EPP/oraarch[fs_mount:100] LV_name=oraarchlv +epprd_rg:cl_activate_fs(1.468):/oracle/EPP/oraarch[fs_mount:101] getlvcb -T -A oraarchlv +epprd_rg:cl_activate_fs(1.469):/oracle/EPP/oraarch[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.486):/oracle/EPP/oraarch[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.469):/oracle/EPP/oraarch[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.6 \n\t lvname = oraarchlv \n\t label = /oracle/EPP/oraarch \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.486):/oracle/EPP/oraarch[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.486):/oracle/EPP/oraarch[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.488):/oracle/EPP/oraarch[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.469):/oracle/EPP/oraarch[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.6 \n\t lvname = oraarchlv \n\t label = /oracle/EPP/oraarch \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.488):/oracle/EPP/oraarch[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.489):/oracle/EPP/oraarch[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.491):/oracle/EPP/oraarch[fs_mount:115] clodmget -q 'name = oraarchlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.494):/oracle/EPP/oraarch[fs_mount:115] CuAt_label=/oracle/EPP/oraarch +epprd_rg:cl_activate_fs(1.494):/oracle/EPP/oraarch[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/oraarch from /etc/filesystems +epprd_rg:cl_activate_fs(1.494):/oracle/EPP/oraarch[fs_mount:119] : should match /oracle/EPP/oraarch from CuAt ODM and /oracle/EPP/oraarch from the LVCB +epprd_rg:cl_activate_fs(1.494):/oracle/EPP/oraarch[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.494):/oracle/EPP/oraarch[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.494):/oracle/EPP/oraarch[fs_mount:123] [[ /oracle/EPP/oraarch != /oracle/EPP/oraarch ]] +epprd_rg:cl_activate_fs(1.494):/oracle/EPP/oraarch[fs_mount:128] [[ /oracle/EPP/oraarch != /oracle/EPP/oraarch ]] +epprd_rg:cl_activate_fs(1.494):/oracle/EPP/oraarch[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.494):/oracle/EPP/oraarch[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.494):/oracle/EPP/oraarch[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.514):/oracle/EPP/oraarch[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.514):/oracle/EPP/oraarch[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.514):/oracle/EPP/oraarch[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_activate_fs(1.515):/oracle/EPP/oraarch[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.515):/oracle/EPP/oraarch[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.540):/oracle/EPP/oraarch[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/oraarch[amlog_trace:319] DATE=2023-01-28T19:50:45.550431 +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/oraarch[amlog_trace:320] echo '|2023-01-28T19:50:45.550431|INFO: Activating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/oraarch[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.543):/oracle/EPP/oraarch[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.545):/oracle/EPP/oraarch[fs_mount:162] : Try to mount filesystem /oracle/EPP/oraarch at Jan 28 19:50:45.000 +epprd_rg:cl_activate_fs(1.545):/oracle/EPP/oraarch[fs_mount:163] mount /oracle/EPP/oraarch +epprd_rg:cl_activate_fs(1.557):/oracle/EPP/oraarch[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.557):/oracle/EPP/oraarch[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.557):/oracle/EPP/oraarch[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.557):/oracle/EPP/oraarch[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/oraarch' +epprd_rg:cl_activate_fs(1.557):/oracle/EPP/oraarch[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.558):/oracle/EPP/oraarch[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.583):/oracle/EPP/oraarch[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.586):/oracle/EPP/oraarch[amlog_trace:319] DATE=2023-01-28T19:50:45.593676 +epprd_rg:cl_activate_fs(1.586):/oracle/EPP/oraarch[amlog_trace:320] echo '|2023-01-28T19:50:45.593676|INFO: Activating Filesystems completed|/oracle/EPP/oraarch' +epprd_rg:cl_activate_fs(1.586):/oracle/EPP/oraarch[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.586):/oracle/EPP/oraarch[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.586):/oracle/EPP/oraarch[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.586):/oracle/EPP/oraarch[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.586):/oracle/EPP/oraarch[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.586):/oracle/EPP/oraarch[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.586):/oracle/EPP/oraarch[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.586):/oracle/EPP/oraarch[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.586):/oracle/EPP/oraarch[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.586):/oracle/EPP/oraarch[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.586):/oracle/EPP/oraarch[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.587):/oracle/EPP/oraarch[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.588):/oracle/EPP/oraarch[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/oraarch[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/oraarch[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/oraarch[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/oraarch[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/oraarch[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/oraarch[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/oraarch[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/oraarch[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.469):/oracle/EPP/oraarch[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.6 \n\t lvname = oraarchlv \n\t label = /oracle/EPP/oraarch \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/oraarch[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/origlogA +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[activate_fs_process_group:540] fs_mount /oracle/EPP/origlogA fsck epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[fs_mount:69] FS=/oracle/EPP/origlogA +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/origlogA +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.590):/oracle/EPP/origlogA[fs_mount:86] lsfs -c /oracle/EPP/origlogA +epprd_rg:cl_activate_fs(1.591):/oracle/EPP/origlogA[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.596):/oracle/EPP/origlogA[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.592):/oracle/EPP/origlogA[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogA:/dev/origlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.596):/oracle/EPP/origlogA[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.596):/oracle/EPP/origlogA[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.597):/oracle/EPP/origlogA[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.592):/oracle/EPP/origlogA[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogA:/dev/origlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.598):/oracle/EPP/origlogA[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.599):/oracle/EPP/origlogA[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.599):/oracle/EPP/origlogA[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.600):/oracle/EPP/origlogA[fs_mount:100] LV_name=origlogAlv +epprd_rg:cl_activate_fs(1.600):/oracle/EPP/origlogA[fs_mount:101] getlvcb -T -A origlogAlv +epprd_rg:cl_activate_fs(1.601):/oracle/EPP/origlogA[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.620):/oracle/EPP/origlogA[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.601):/oracle/EPP/origlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.12 \n\t lvname = origlogAlv \n\t label = /oracle/EPP/origlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:48 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.620):/oracle/EPP/origlogA[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.620):/oracle/EPP/origlogA[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.621):/oracle/EPP/origlogA[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.601):/oracle/EPP/origlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.12 \n\t lvname = origlogAlv \n\t label = /oracle/EPP/origlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:48 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.621):/oracle/EPP/origlogA[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.623):/oracle/EPP/origlogA[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.624):/oracle/EPP/origlogA[fs_mount:115] clodmget -q 'name = origlogAlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.627):/oracle/EPP/origlogA[fs_mount:115] CuAt_label=/oracle/EPP/origlogA +epprd_rg:cl_activate_fs(1.627):/oracle/EPP/origlogA[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/origlogA from /etc/filesystems +epprd_rg:cl_activate_fs(1.627):/oracle/EPP/origlogA[fs_mount:119] : should match /oracle/EPP/origlogA from CuAt ODM and /oracle/EPP/origlogA from the LVCB +epprd_rg:cl_activate_fs(1.627):/oracle/EPP/origlogA[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.627):/oracle/EPP/origlogA[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.627):/oracle/EPP/origlogA[fs_mount:123] [[ /oracle/EPP/origlogA != /oracle/EPP/origlogA ]] +epprd_rg:cl_activate_fs(1.627):/oracle/EPP/origlogA[fs_mount:128] [[ /oracle/EPP/origlogA != /oracle/EPP/origlogA ]] +epprd_rg:cl_activate_fs(1.627):/oracle/EPP/origlogA[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.627):/oracle/EPP/origlogA[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.627):/oracle/EPP/origlogA[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.648):/oracle/EPP/origlogA[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.648):/oracle/EPP/origlogA[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.648):/oracle/EPP/origlogA[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_activate_fs(1.648):/oracle/EPP/origlogA[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.649):/oracle/EPP/origlogA[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.673):/oracle/EPP/origlogA[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.676):/oracle/EPP/origlogA[amlog_trace:319] DATE=2023-01-28T19:50:45.683525 +epprd_rg:cl_activate_fs(1.676):/oracle/EPP/origlogA[amlog_trace:320] echo '|2023-01-28T19:50:45.683525|INFO: Activating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_activate_fs(1.676):/oracle/EPP/origlogA[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.676):/oracle/EPP/origlogA[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.678):/oracle/EPP/origlogA[fs_mount:162] : Try to mount filesystem /oracle/EPP/origlogA at Jan 28 19:50:45.000 +epprd_rg:cl_activate_fs(1.678):/oracle/EPP/origlogA[fs_mount:163] mount /oracle/EPP/origlogA +epprd_rg:cl_activate_fs(1.690):/oracle/EPP/origlogA[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.690):/oracle/EPP/origlogA[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.690):/oracle/EPP/origlogA[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.690):/oracle/EPP/origlogA[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/origlogA' +epprd_rg:cl_activate_fs(1.690):/oracle/EPP/origlogA[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.691):/oracle/EPP/origlogA[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogA[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.718):/oracle/EPP/origlogA[amlog_trace:319] DATE=2023-01-28T19:50:45.725897 +epprd_rg:cl_activate_fs(1.718):/oracle/EPP/origlogA[amlog_trace:320] echo '|2023-01-28T19:50:45.725897|INFO: Activating Filesystems completed|/oracle/EPP/origlogA' +epprd_rg:cl_activate_fs(1.718):/oracle/EPP/origlogA[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.718):/oracle/EPP/origlogA[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.718):/oracle/EPP/origlogA[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.718):/oracle/EPP/origlogA[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.718):/oracle/EPP/origlogA[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.718):/oracle/EPP/origlogA[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.718):/oracle/EPP/origlogA[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.718):/oracle/EPP/origlogA[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.718):/oracle/EPP/origlogA[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.718):/oracle/EPP/origlogA[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.718):/oracle/EPP/origlogA[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.720):/oracle/EPP/origlogA[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.721):/oracle/EPP/origlogA[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.722):/oracle/EPP/origlogA[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.722):/oracle/EPP/origlogA[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.722):/oracle/EPP/origlogA[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.722):/oracle/EPP/origlogA[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.722):/oracle/EPP/origlogA[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.722):/oracle/EPP/origlogA[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.722):/oracle/EPP/origlogA[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.722):/oracle/EPP/origlogA[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.601):/oracle/EPP/origlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.12 \n\t lvname = origlogAlv \n\t label = /oracle/EPP/origlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:48 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogA[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/origlogB +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[activate_fs_process_group:540] fs_mount /oracle/EPP/origlogB fsck epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[fs_mount:69] FS=/oracle/EPP/origlogB +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/origlogB +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[fs_mount:86] lsfs -c /oracle/EPP/origlogB +epprd_rg:cl_activate_fs(1.724):/oracle/EPP/origlogB[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.729):/oracle/EPP/origlogB[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.724):/oracle/EPP/origlogB[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogB:/dev/origlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.729):/oracle/EPP/origlogB[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.729):/oracle/EPP/origlogB[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.730):/oracle/EPP/origlogB[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.724):/oracle/EPP/origlogB[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogB:/dev/origlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.731):/oracle/EPP/origlogB[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.731):/oracle/EPP/origlogB[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.732):/oracle/EPP/origlogB[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.733):/oracle/EPP/origlogB[fs_mount:100] LV_name=origlogBlv +epprd_rg:cl_activate_fs(1.733):/oracle/EPP/origlogB[fs_mount:101] getlvcb -T -A origlogBlv +epprd_rg:cl_activate_fs(1.734):/oracle/EPP/origlogB[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.752):/oracle/EPP/origlogB[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.734):/oracle/EPP/origlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.13 \n\t lvname = origlogBlv \n\t label = /oracle/EPP/origlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.752):/oracle/EPP/origlogB[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.752):/oracle/EPP/origlogB[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.753):/oracle/EPP/origlogB[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.734):/oracle/EPP/origlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.13 \n\t lvname = origlogBlv \n\t label = /oracle/EPP/origlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.754):/oracle/EPP/origlogB[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.755):/oracle/EPP/origlogB[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.757):/oracle/EPP/origlogB[fs_mount:115] clodmget -q 'name = origlogBlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.760):/oracle/EPP/origlogB[fs_mount:115] CuAt_label=/oracle/EPP/origlogB +epprd_rg:cl_activate_fs(1.760):/oracle/EPP/origlogB[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/origlogB from /etc/filesystems +epprd_rg:cl_activate_fs(1.760):/oracle/EPP/origlogB[fs_mount:119] : should match /oracle/EPP/origlogB from CuAt ODM and /oracle/EPP/origlogB from the LVCB +epprd_rg:cl_activate_fs(1.760):/oracle/EPP/origlogB[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.760):/oracle/EPP/origlogB[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.760):/oracle/EPP/origlogB[fs_mount:123] [[ /oracle/EPP/origlogB != /oracle/EPP/origlogB ]] +epprd_rg:cl_activate_fs(1.760):/oracle/EPP/origlogB[fs_mount:128] [[ /oracle/EPP/origlogB != /oracle/EPP/origlogB ]] +epprd_rg:cl_activate_fs(1.760):/oracle/EPP/origlogB[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.760):/oracle/EPP/origlogB[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.760):/oracle/EPP/origlogB[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.780):/oracle/EPP/origlogB[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.780):/oracle/EPP/origlogB[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.780):/oracle/EPP/origlogB[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_activate_fs(1.780):/oracle/EPP/origlogB[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.781):/oracle/EPP/origlogB[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.806):/oracle/EPP/origlogB[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.808):/oracle/EPP/origlogB[amlog_trace:319] DATE=2023-01-28T19:50:45.816300 +epprd_rg:cl_activate_fs(1.808):/oracle/EPP/origlogB[amlog_trace:320] echo '|2023-01-28T19:50:45.816300|INFO: Activating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_activate_fs(1.808):/oracle/EPP/origlogB[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.809):/oracle/EPP/origlogB[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.811):/oracle/EPP/origlogB[fs_mount:162] : Try to mount filesystem /oracle/EPP/origlogB at Jan 28 19:50:45.000 +epprd_rg:cl_activate_fs(1.811):/oracle/EPP/origlogB[fs_mount:163] mount /oracle/EPP/origlogB +epprd_rg:cl_activate_fs(1.823):/oracle/EPP/origlogB[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.823):/oracle/EPP/origlogB[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.823):/oracle/EPP/origlogB[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.823):/oracle/EPP/origlogB[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/origlogB' +epprd_rg:cl_activate_fs(1.823):/oracle/EPP/origlogB[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.824):/oracle/EPP/origlogB[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/origlogB[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.851):/oracle/EPP/origlogB[amlog_trace:319] DATE=2023-01-28T19:50:45.859025 +epprd_rg:cl_activate_fs(1.851):/oracle/EPP/origlogB[amlog_trace:320] echo '|2023-01-28T19:50:45.859025|INFO: Activating Filesystems completed|/oracle/EPP/origlogB' +epprd_rg:cl_activate_fs(1.851):/oracle/EPP/origlogB[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.851):/oracle/EPP/origlogB[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.851):/oracle/EPP/origlogB[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.851):/oracle/EPP/origlogB[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.851):/oracle/EPP/origlogB[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.851):/oracle/EPP/origlogB[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.851):/oracle/EPP/origlogB[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.851):/oracle/EPP/origlogB[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.851):/oracle/EPP/origlogB[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.851):/oracle/EPP/origlogB[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.851):/oracle/EPP/origlogB[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.852):/oracle/EPP/origlogB[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.853):/oracle/EPP/origlogB[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.855):/oracle/EPP/origlogB[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.855):/oracle/EPP/origlogB[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.855):/oracle/EPP/origlogB[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.855):/oracle/EPP/origlogB[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.855):/oracle/EPP/origlogB[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.855):/oracle/EPP/origlogB[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.855):/oracle/EPP/origlogB[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.855):/oracle/EPP/origlogB[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.734):/oracle/EPP/origlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.13 \n\t lvname = origlogBlv \n\t label = /oracle/EPP/origlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.855):/oracle/EPP/origlogB[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.855):/oracle/EPP/sapdata1[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(1.855):/oracle/EPP/sapdata1[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.855):/oracle/EPP/sapdata1[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.856):/oracle/EPP/sapdata1[activate_fs_process_group:540] fs_mount /oracle/EPP/sapdata1 fsck epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(1.856):/oracle/EPP/sapdata1[fs_mount:69] FS=/oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(1.856):/oracle/EPP/sapdata1[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.856):/oracle/EPP/sapdata1[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.856):/oracle/EPP/sapdata1[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.856):/oracle/EPP/sapdata1[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(1.856):/oracle/EPP/sapdata1[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.856):/oracle/EPP/sapdata1[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.856):/oracle/EPP/sapdata1[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.856):/oracle/EPP/sapdata1[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.856):/oracle/EPP/sapdata1[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.856):/oracle/EPP/sapdata1[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.856):/oracle/EPP/sapdata1[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.856):/oracle/EPP/sapdata1[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.856):/oracle/EPP/sapdata1[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.856):/oracle/EPP/sapdata1[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.856):/oracle/EPP/sapdata1[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.856):/oracle/EPP/sapdata1[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.856):/oracle/EPP/sapdata1[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(1.856):/oracle/EPP/sapdata1[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.856):/oracle/EPP/sapdata1[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.856):/oracle/EPP/sapdata1[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.856):/oracle/EPP/sapdata1[fs_mount:86] lsfs -c /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(1.857):/oracle/EPP/sapdata1[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.862):/oracle/EPP/sapdata1[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.857):/oracle/EPP/sapdata1[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata1:/dev/sapdata1lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(1.862):/oracle/EPP/sapdata1[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.862):/oracle/EPP/sapdata1[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.863):/oracle/EPP/sapdata1[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.857):/oracle/EPP/sapdata1[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata1:/dev/sapdata1lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(1.864):/oracle/EPP/sapdata1[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.864):/oracle/EPP/sapdata1[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.865):/oracle/EPP/sapdata1[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.866):/oracle/EPP/sapdata1[fs_mount:100] LV_name=sapdata1lv +epprd_rg:cl_activate_fs(1.866):/oracle/EPP/sapdata1[fs_mount:101] getlvcb -T -A sapdata1lv +epprd_rg:cl_activate_fs(1.867):/oracle/EPP/sapdata1[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.885):/oracle/EPP/sapdata1[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.867):/oracle/EPP/sapdata1[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.7 \n\t lvname = sapdata1lv \n\t label = /oracle/EPP/sapdata1 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:44 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' +epprd_rg:cl_activate_fs(1.885):/oracle/EPP/sapdata1[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.885):/oracle/EPP/sapdata1[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.886):/oracle/EPP/sapdata1[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.867):/oracle/EPP/sapdata1[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.7 \n\t lvname = sapdata1lv \n\t label = /oracle/EPP/sapdata1 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:44 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' +epprd_rg:cl_activate_fs(1.887):/oracle/EPP/sapdata1[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.888):/oracle/EPP/sapdata1[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.889):/oracle/EPP/sapdata1[fs_mount:115] clodmget -q 'name = sapdata1lv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.892):/oracle/EPP/sapdata1[fs_mount:115] CuAt_label=/oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(1.892):/oracle/EPP/sapdata1[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/sapdata1 from /etc/filesystems +epprd_rg:cl_activate_fs(1.892):/oracle/EPP/sapdata1[fs_mount:119] : should match /oracle/EPP/sapdata1 from CuAt ODM and /oracle/EPP/sapdata1 from the LVCB +epprd_rg:cl_activate_fs(1.892):/oracle/EPP/sapdata1[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.892):/oracle/EPP/sapdata1[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.892):/oracle/EPP/sapdata1[fs_mount:123] [[ /oracle/EPP/sapdata1 != /oracle/EPP/sapdata1 ]] +epprd_rg:cl_activate_fs(1.892):/oracle/EPP/sapdata1[fs_mount:128] [[ /oracle/EPP/sapdata1 != /oracle/EPP/sapdata1 ]] +epprd_rg:cl_activate_fs(1.892):/oracle/EPP/sapdata1[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.892):/oracle/EPP/sapdata1[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.893):/oracle/EPP/sapdata1[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.913):/oracle/EPP/sapdata1[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.913):/oracle/EPP/sapdata1[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.913):/oracle/EPP/sapdata1[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_activate_fs(1.913):/oracle/EPP/sapdata1[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.914):/oracle/EPP/sapdata1[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.938):/oracle/EPP/sapdata1[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.941):/oracle/EPP/sapdata1[amlog_trace:319] DATE=2023-01-28T19:50:45.948454 +epprd_rg:cl_activate_fs(1.941):/oracle/EPP/sapdata1[amlog_trace:320] echo '|2023-01-28T19:50:45.948454|INFO: Activating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_activate_fs(1.941):/oracle/EPP/sapdata1[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.941):/oracle/EPP/sapdata1[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.943):/oracle/EPP/sapdata1[fs_mount:162] : Try to mount filesystem /oracle/EPP/sapdata1 at Jan 28 19:50:45.000 +epprd_rg:cl_activate_fs(1.943):/oracle/EPP/sapdata1[fs_mount:163] mount /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(1.955):/oracle/EPP/sapdata1[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.956):/oracle/EPP/sapdata1[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.956):/oracle/EPP/sapdata1[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.956):/oracle/EPP/sapdata1[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/sapdata1' +epprd_rg:cl_activate_fs(1.956):/oracle/EPP/sapdata1[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.957):/oracle/EPP/sapdata1[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata1[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.983):/oracle/EPP/sapdata1[amlog_trace:319] DATE=2023-01-28T19:50:45.991347 +epprd_rg:cl_activate_fs(1.984):/oracle/EPP/sapdata1[amlog_trace:320] echo '|2023-01-28T19:50:45.991347|INFO: Activating Filesystems completed|/oracle/EPP/sapdata1' +epprd_rg:cl_activate_fs(1.984):/oracle/EPP/sapdata1[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.984):/oracle/EPP/sapdata1[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.984):/oracle/EPP/sapdata1[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.984):/oracle/EPP/sapdata1[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.984):/oracle/EPP/sapdata1[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.984):/oracle/EPP/sapdata1[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.984):/oracle/EPP/sapdata1[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.984):/oracle/EPP/sapdata1[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.984):/oracle/EPP/sapdata1[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.984):/oracle/EPP/sapdata1[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.984):/oracle/EPP/sapdata1[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.985):/oracle/EPP/sapdata1[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.985):/oracle/EPP/sapdata1[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.987):/oracle/EPP/sapdata1[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.987):/oracle/EPP/sapdata1[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata1[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata1[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata1[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata1[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata1[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata1[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.867):/oracle/EPP/sapdata1[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.7 \n\t lvname = sapdata1lv \n\t label = /oracle/EPP/sapdata1 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:44 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata1[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[activate_fs_process_group:540] fs_mount /oracle/EPP/sapdata2 fsck epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[fs_mount:69] FS=/oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[fs_mount:86] lsfs -c /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(1.989):/oracle/EPP/sapdata2[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.994):/oracle/EPP/sapdata2[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.989):/oracle/EPP/sapdata2[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata2:/dev/sapdata2lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(1.994):/oracle/EPP/sapdata2[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.994):/oracle/EPP/sapdata2[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.995):/oracle/EPP/sapdata2[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.989):/oracle/EPP/sapdata2[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata2:/dev/sapdata2lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(1.996):/oracle/EPP/sapdata2[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.997):/oracle/EPP/sapdata2[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.997):/oracle/EPP/sapdata2[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.998):/oracle/EPP/sapdata2[fs_mount:100] LV_name=sapdata2lv +epprd_rg:cl_activate_fs(1.998):/oracle/EPP/sapdata2[fs_mount:101] getlvcb -T -A sapdata2lv +epprd_rg:cl_activate_fs(1.999):/oracle/EPP/sapdata2[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(2.019):/oracle/EPP/sapdata2[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.999):/oracle/EPP/sapdata2[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.8 \n\t lvname = sapdata2lv \n\t label = /oracle/EPP/sapdata2 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:45 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' +epprd_rg:cl_activate_fs(2.019):/oracle/EPP/sapdata2[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(2.019):/oracle/EPP/sapdata2[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.020):/oracle/EPP/sapdata2[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.999):/oracle/EPP/sapdata2[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.8 \n\t lvname = sapdata2lv \n\t label = /oracle/EPP/sapdata2 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:45 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' +epprd_rg:cl_activate_fs(2.021):/oracle/EPP/sapdata2[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(2.022):/oracle/EPP/sapdata2[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(2.023):/oracle/EPP/sapdata2[fs_mount:115] clodmget -q 'name = sapdata2lv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(2.027):/oracle/EPP/sapdata2[fs_mount:115] CuAt_label=/oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(2.027):/oracle/EPP/sapdata2[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/sapdata2 from /etc/filesystems +epprd_rg:cl_activate_fs(2.027):/oracle/EPP/sapdata2[fs_mount:119] : should match /oracle/EPP/sapdata2 from CuAt ODM and /oracle/EPP/sapdata2 from the LVCB +epprd_rg:cl_activate_fs(2.027):/oracle/EPP/sapdata2[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(2.027):/oracle/EPP/sapdata2[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(2.027):/oracle/EPP/sapdata2[fs_mount:123] [[ /oracle/EPP/sapdata2 != /oracle/EPP/sapdata2 ]] +epprd_rg:cl_activate_fs(2.027):/oracle/EPP/sapdata2[fs_mount:128] [[ /oracle/EPP/sapdata2 != /oracle/EPP/sapdata2 ]] +epprd_rg:cl_activate_fs(2.027):/oracle/EPP/sapdata2[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.027):/oracle/EPP/sapdata2[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(2.027):/oracle/EPP/sapdata2[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(2.047):/oracle/EPP/sapdata2[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.047):/oracle/EPP/sapdata2[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(2.047):/oracle/EPP/sapdata2[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_activate_fs(2.047):/oracle/EPP/sapdata2[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.048):/oracle/EPP/sapdata2[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.072):/oracle/EPP/sapdata2[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.075):/oracle/EPP/sapdata2[amlog_trace:319] DATE=2023-01-28T19:50:46.082859 +epprd_rg:cl_activate_fs(2.075):/oracle/EPP/sapdata2[amlog_trace:320] echo '|2023-01-28T19:50:46.082859|INFO: Activating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_activate_fs(2.075):/oracle/EPP/sapdata2[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.075):/oracle/EPP/sapdata2[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(2.078):/oracle/EPP/sapdata2[fs_mount:162] : Try to mount filesystem /oracle/EPP/sapdata2 at Jan 28 19:50:46.000 +epprd_rg:cl_activate_fs(2.078):/oracle/EPP/sapdata2[fs_mount:163] mount /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(2.089):/oracle/EPP/sapdata2[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.090):/oracle/EPP/sapdata2[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(2.090):/oracle/EPP/sapdata2[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(2.090):/oracle/EPP/sapdata2[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/sapdata2' +epprd_rg:cl_activate_fs(2.090):/oracle/EPP/sapdata2[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.091):/oracle/EPP/sapdata2[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.115):/oracle/EPP/sapdata2[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata2[amlog_trace:319] DATE=2023-01-28T19:50:46.125429 +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata2[amlog_trace:320] echo '|2023-01-28T19:50:46.125429|INFO: Activating Filesystems completed|/oracle/EPP/sapdata2' +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata2[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata2[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata2[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata2[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata2[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata2[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata2[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata2[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata2[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata2[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata2[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(2.119):/oracle/EPP/sapdata2[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(2.119):/oracle/EPP/sapdata2[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(2.121):/oracle/EPP/sapdata2[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata2[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata2[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata2[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata2[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata2[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata2[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata2[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.999):/oracle/EPP/sapdata2[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.8 \n\t lvname = sapdata2lv \n\t label = /oracle/EPP/sapdata2 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:45 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata2[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[activate_fs_process_group:540] fs_mount /oracle/EPP/sapdata3 fsck epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[fs_mount:69] FS=/oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(2.122):/oracle/EPP/sapdata3[fs_mount:86] lsfs -c /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(2.123):/oracle/EPP/sapdata3[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(2.128):/oracle/EPP/sapdata3[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(2.123):/oracle/EPP/sapdata3[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata3:/dev/sapdata3lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.128):/oracle/EPP/sapdata3[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(2.128):/oracle/EPP/sapdata3[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.129):/oracle/EPP/sapdata3[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(2.123):/oracle/EPP/sapdata3[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata3:/dev/sapdata3lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.130):/oracle/EPP/sapdata3[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(2.131):/oracle/EPP/sapdata3[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(2.131):/oracle/EPP/sapdata3[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(2.132):/oracle/EPP/sapdata3[fs_mount:100] LV_name=sapdata3lv +epprd_rg:cl_activate_fs(2.132):/oracle/EPP/sapdata3[fs_mount:101] getlvcb -T -A sapdata3lv +epprd_rg:cl_activate_fs(2.133):/oracle/EPP/sapdata3[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(2.151):/oracle/EPP/sapdata3[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(2.133):/oracle/EPP/sapdata3[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.9 \n\t lvname = sapdata3lv \n\t label = /oracle/EPP/sapdata3 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' +epprd_rg:cl_activate_fs(2.151):/oracle/EPP/sapdata3[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(2.151):/oracle/EPP/sapdata3[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.152):/oracle/EPP/sapdata3[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(2.133):/oracle/EPP/sapdata3[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.9 \n\t lvname = sapdata3lv \n\t label = /oracle/EPP/sapdata3 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' +epprd_rg:cl_activate_fs(2.153):/oracle/EPP/sapdata3[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(2.154):/oracle/EPP/sapdata3[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(2.155):/oracle/EPP/sapdata3[fs_mount:115] clodmget -q 'name = sapdata3lv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(2.159):/oracle/EPP/sapdata3[fs_mount:115] CuAt_label=/oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(2.159):/oracle/EPP/sapdata3[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/sapdata3 from /etc/filesystems +epprd_rg:cl_activate_fs(2.159):/oracle/EPP/sapdata3[fs_mount:119] : should match /oracle/EPP/sapdata3 from CuAt ODM and /oracle/EPP/sapdata3 from the LVCB +epprd_rg:cl_activate_fs(2.159):/oracle/EPP/sapdata3[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(2.159):/oracle/EPP/sapdata3[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(2.159):/oracle/EPP/sapdata3[fs_mount:123] [[ /oracle/EPP/sapdata3 != /oracle/EPP/sapdata3 ]] +epprd_rg:cl_activate_fs(2.159):/oracle/EPP/sapdata3[fs_mount:128] [[ /oracle/EPP/sapdata3 != /oracle/EPP/sapdata3 ]] +epprd_rg:cl_activate_fs(2.159):/oracle/EPP/sapdata3[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.159):/oracle/EPP/sapdata3[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(2.159):/oracle/EPP/sapdata3[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(2.179):/oracle/EPP/sapdata3[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.179):/oracle/EPP/sapdata3[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(2.179):/oracle/EPP/sapdata3[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_activate_fs(2.179):/oracle/EPP/sapdata3[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.180):/oracle/EPP/sapdata3[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.204):/oracle/EPP/sapdata3[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.207):/oracle/EPP/sapdata3[amlog_trace:319] DATE=2023-01-28T19:50:46.215103 +epprd_rg:cl_activate_fs(2.207):/oracle/EPP/sapdata3[amlog_trace:320] echo '|2023-01-28T19:50:46.215103|INFO: Activating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_activate_fs(2.207):/oracle/EPP/sapdata3[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.207):/oracle/EPP/sapdata3[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(2.210):/oracle/EPP/sapdata3[fs_mount:162] : Try to mount filesystem /oracle/EPP/sapdata3 at Jan 28 19:50:46.000 +epprd_rg:cl_activate_fs(2.210):/oracle/EPP/sapdata3[fs_mount:163] mount /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(2.221):/oracle/EPP/sapdata3[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.221):/oracle/EPP/sapdata3[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(2.221):/oracle/EPP/sapdata3[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(2.222):/oracle/EPP/sapdata3[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/sapdata3' +epprd_rg:cl_activate_fs(2.222):/oracle/EPP/sapdata3[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.223):/oracle/EPP/sapdata3[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.248):/oracle/EPP/sapdata3[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.250):/oracle/EPP/sapdata3[amlog_trace:319] DATE=2023-01-28T19:50:46.258158 +epprd_rg:cl_activate_fs(2.250):/oracle/EPP/sapdata3[amlog_trace:320] echo '|2023-01-28T19:50:46.258158|INFO: Activating Filesystems completed|/oracle/EPP/sapdata3' +epprd_rg:cl_activate_fs(2.250):/oracle/EPP/sapdata3[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.250):/oracle/EPP/sapdata3[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(2.250):/oracle/EPP/sapdata3[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(2.250):/oracle/EPP/sapdata3[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(2.250):/oracle/EPP/sapdata3[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(2.250):/oracle/EPP/sapdata3[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(2.250):/oracle/EPP/sapdata3[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(2.250):/oracle/EPP/sapdata3[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata3[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata3[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata3[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(2.252):/oracle/EPP/sapdata3[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(2.253):/oracle/EPP/sapdata3[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(2.254):/oracle/EPP/sapdata3[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(2.254):/oracle/EPP/sapdata3[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(2.254):/oracle/EPP/sapdata3[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(2.254):/oracle/EPP/sapdata3[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(2.254):/oracle/EPP/sapdata3[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(2.254):/oracle/EPP/sapdata3[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(2.254):/oracle/EPP/sapdata3[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(2.254):/oracle/EPP/sapdata3[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(2.133):/oracle/EPP/sapdata3[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.9 \n\t lvname = sapdata3lv \n\t label = /oracle/EPP/sapdata3 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata3[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[activate_fs_process_group:540] fs_mount /oracle/EPP/sapdata4 fsck epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[fs_mount:69] FS=/oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(2.255):/oracle/EPP/sapdata4[fs_mount:86] lsfs -c /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(2.256):/oracle/EPP/sapdata4[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(2.261):/oracle/EPP/sapdata4[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(2.256):/oracle/EPP/sapdata4[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata4:/dev/sapdata4lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.261):/oracle/EPP/sapdata4[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(2.261):/oracle/EPP/sapdata4[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.262):/oracle/EPP/sapdata4[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(2.256):/oracle/EPP/sapdata4[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata4:/dev/sapdata4lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.263):/oracle/EPP/sapdata4[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(2.264):/oracle/EPP/sapdata4[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(2.264):/oracle/EPP/sapdata4[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(2.265):/oracle/EPP/sapdata4[fs_mount:100] LV_name=sapdata4lv +epprd_rg:cl_activate_fs(2.265):/oracle/EPP/sapdata4[fs_mount:101] getlvcb -T -A sapdata4lv +epprd_rg:cl_activate_fs(2.266):/oracle/EPP/sapdata4[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(2.284):/oracle/EPP/sapdata4[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(2.266):/oracle/EPP/sapdata4[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.10 \n\t lvname = sapdata4lv \n\t label = /oracle/EPP/sapdata4 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' +epprd_rg:cl_activate_fs(2.284):/oracle/EPP/sapdata4[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(2.284):/oracle/EPP/sapdata4[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.285):/oracle/EPP/sapdata4[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(2.266):/oracle/EPP/sapdata4[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.10 \n\t lvname = sapdata4lv \n\t label = /oracle/EPP/sapdata4 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' +epprd_rg:cl_activate_fs(2.286):/oracle/EPP/sapdata4[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(2.287):/oracle/EPP/sapdata4[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(2.288):/oracle/EPP/sapdata4[fs_mount:115] clodmget -q 'name = sapdata4lv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(2.292):/oracle/EPP/sapdata4[fs_mount:115] CuAt_label=/oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(2.292):/oracle/EPP/sapdata4[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/sapdata4 from /etc/filesystems +epprd_rg:cl_activate_fs(2.292):/oracle/EPP/sapdata4[fs_mount:119] : should match /oracle/EPP/sapdata4 from CuAt ODM and /oracle/EPP/sapdata4 from the LVCB +epprd_rg:cl_activate_fs(2.292):/oracle/EPP/sapdata4[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(2.292):/oracle/EPP/sapdata4[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(2.292):/oracle/EPP/sapdata4[fs_mount:123] [[ /oracle/EPP/sapdata4 != /oracle/EPP/sapdata4 ]] +epprd_rg:cl_activate_fs(2.292):/oracle/EPP/sapdata4[fs_mount:128] [[ /oracle/EPP/sapdata4 != /oracle/EPP/sapdata4 ]] +epprd_rg:cl_activate_fs(2.292):/oracle/EPP/sapdata4[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.292):/oracle/EPP/sapdata4[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(2.292):/oracle/EPP/sapdata4[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(2.312):/oracle/EPP/sapdata4[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.312):/oracle/EPP/sapdata4[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(2.312):/oracle/EPP/sapdata4[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_activate_fs(2.313):/oracle/EPP/sapdata4[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.313):/oracle/EPP/sapdata4[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.338):/oracle/EPP/sapdata4[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.340):/oracle/EPP/sapdata4[amlog_trace:319] DATE=2023-01-28T19:50:46.348194 +epprd_rg:cl_activate_fs(2.340):/oracle/EPP/sapdata4[amlog_trace:320] echo '|2023-01-28T19:50:46.348194|INFO: Activating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_activate_fs(2.340):/oracle/EPP/sapdata4[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.340):/oracle/EPP/sapdata4[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(2.343):/oracle/EPP/sapdata4[fs_mount:162] : Try to mount filesystem /oracle/EPP/sapdata4 at Jan 28 19:50:46.000 +epprd_rg:cl_activate_fs(2.343):/oracle/EPP/sapdata4[fs_mount:163] mount /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(2.355):/oracle/EPP/sapdata4[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.355):/oracle/EPP/sapdata4[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(2.355):/oracle/EPP/sapdata4[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(2.355):/oracle/EPP/sapdata4[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/sapdata4' +epprd_rg:cl_activate_fs(2.355):/oracle/EPP/sapdata4[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.356):/oracle/EPP/sapdata4[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.381):/oracle/EPP/sapdata4[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.384):/oracle/EPP/sapdata4[amlog_trace:319] DATE=2023-01-28T19:50:46.391462 +epprd_rg:cl_activate_fs(2.384):/oracle/EPP/sapdata4[amlog_trace:320] echo '|2023-01-28T19:50:46.391462|INFO: Activating Filesystems completed|/oracle/EPP/sapdata4' +epprd_rg:cl_activate_fs(2.384):/oracle/EPP/sapdata4[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.384):/oracle/EPP/sapdata4[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(2.384):/oracle/EPP/sapdata4[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(2.384):/oracle/EPP/sapdata4[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(2.384):/oracle/EPP/sapdata4[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(2.384):/oracle/EPP/sapdata4[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(2.384):/oracle/EPP/sapdata4[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(2.384):/oracle/EPP/sapdata4[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(2.384):/oracle/EPP/sapdata4[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(2.384):/oracle/EPP/sapdata4[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(2.384):/oracle/EPP/sapdata4[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(2.385):/oracle/EPP/sapdata4[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(2.386):/oracle/EPP/sapdata4[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(2.387):/oracle/EPP/sapdata4[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(2.387):/oracle/EPP/sapdata4[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(2.388):/oracle/EPP/sapdata4[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(2.388):/oracle/EPP/sapdata4[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(2.388):/oracle/EPP/sapdata4[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(2.388):/oracle/EPP/sapdata4[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(2.388):/oracle/EPP/sapdata4[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(2.388):/oracle/EPP/sapdata4[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(2.266):/oracle/EPP/sapdata4[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.10 \n\t lvname = sapdata4lv \n\t label = /oracle/EPP/sapdata4 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(2.388):/oracle/EPP/sapdata4[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(2.388):/sapmnt[activate_fs_process_group:527] PS4_LOOP=/sapmnt +epprd_rg:cl_activate_fs(2.388):/sapmnt[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(2.388):/sapmnt[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(2.388):/sapmnt[activate_fs_process_group:540] fs_mount /sapmnt fsck epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(2.388):/sapmnt[fs_mount:69] FS=/sapmnt +epprd_rg:cl_activate_fs(2.388):/sapmnt[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(2.388):/sapmnt[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(2.388):/sapmnt[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(2.388):/sapmnt[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(2.388):/sapmnt[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(2.388):/sapmnt[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.388):/sapmnt[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(2.388):/sapmnt[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(2.388):/sapmnt[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(2.388):/sapmnt[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(2.388):/sapmnt[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(2.388):/sapmnt[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(2.388):/sapmnt[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(2.388):/sapmnt[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(2.388):/sapmnt[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(2.388):/sapmnt[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(2.388):/sapmnt[fs_mount:81] : Here check to see if the information in /etc/filesystems for /sapmnt +epprd_rg:cl_activate_fs(2.388):/sapmnt[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(2.388):/sapmnt[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(2.388):/sapmnt[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(2.388):/sapmnt[fs_mount:86] lsfs -c /sapmnt +epprd_rg:cl_activate_fs(2.389):/sapmnt[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(2.394):/sapmnt[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(2.389):/sapmnt[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/sapmnt:/dev/sapmntlv:jfs2:::20971520:rw:no:no' +epprd_rg:cl_activate_fs(2.394):/sapmnt[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(2.394):/sapmnt[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.395):/sapmnt[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(2.389):/sapmnt[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/sapmnt:/dev/sapmntlv:jfs2:::20971520:rw:no:no' +epprd_rg:cl_activate_fs(2.396):/sapmnt[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(2.397):/sapmnt[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(2.397):/sapmnt[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(2.398):/sapmnt[fs_mount:100] LV_name=sapmntlv +epprd_rg:cl_activate_fs(2.398):/sapmnt[fs_mount:101] getlvcb -T -A sapmntlv +epprd_rg:cl_activate_fs(2.399):/sapmnt[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(2.417):/sapmnt[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(2.400):/sapmnt[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.3 \n\t lvname = sapmntlv \n\t label = /sapmnt \n\t machine id = 44AF14B00 \n\t number lps = 20 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:41 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' +epprd_rg:cl_activate_fs(2.417):/sapmnt[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(2.417):/sapmnt[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.418):/sapmnt[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(2.400):/sapmnt[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.3 \n\t lvname = sapmntlv \n\t label = /sapmnt \n\t machine id = 44AF14B00 \n\t number lps = 20 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:41 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' +epprd_rg:cl_activate_fs(2.419):/sapmnt[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(2.420):/sapmnt[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(2.421):/sapmnt[fs_mount:115] clodmget -q 'name = sapmntlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(2.425):/sapmnt[fs_mount:115] CuAt_label=/sapmnt +epprd_rg:cl_activate_fs(2.425):/sapmnt[fs_mount:118] : At this point, if things are working correctly, /sapmnt from /etc/filesystems +epprd_rg:cl_activate_fs(2.425):/sapmnt[fs_mount:119] : should match /sapmnt from CuAt ODM and /sapmnt from the LVCB +epprd_rg:cl_activate_fs(2.425):/sapmnt[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(2.425):/sapmnt[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(2.425):/sapmnt[fs_mount:123] [[ /sapmnt != /sapmnt ]] +epprd_rg:cl_activate_fs(2.425):/sapmnt[fs_mount:128] [[ /sapmnt != /sapmnt ]] +epprd_rg:cl_activate_fs(2.425):/sapmnt[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.425):/sapmnt[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(2.425):/sapmnt[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(2.445):/sapmnt[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.445):/sapmnt[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(2.445):/sapmnt[fs_mount:160] amlog_trace '' 'Activating Filesystem|/sapmnt' +epprd_rg:cl_activate_fs(2.445):/sapmnt[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.446):/sapmnt[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.470):/sapmnt[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.473):/sapmnt[amlog_trace:319] DATE=2023-01-28T19:50:46.480817 +epprd_rg:cl_activate_fs(2.473):/sapmnt[amlog_trace:320] echo '|2023-01-28T19:50:46.480817|INFO: Activating Filesystem|/sapmnt' +epprd_rg:cl_activate_fs(2.473):/sapmnt[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.473):/sapmnt[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(2.476):/sapmnt[fs_mount:162] : Try to mount filesystem /sapmnt at Jan 28 19:50:46.000 +epprd_rg:cl_activate_fs(2.476):/sapmnt[fs_mount:163] mount /sapmnt +epprd_rg:cl_activate_fs(2.487):/sapmnt[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.487):/sapmnt[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(2.487):/sapmnt[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(2.487):/sapmnt[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/sapmnt' +epprd_rg:cl_activate_fs(2.487):/sapmnt[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.488):/sapmnt[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.513):/sapmnt[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.515):/sapmnt[amlog_trace:319] DATE=2023-01-28T19:50:46.523129 +epprd_rg:cl_activate_fs(2.515):/sapmnt[amlog_trace:320] echo '|2023-01-28T19:50:46.523129|INFO: Activating Filesystems completed|/sapmnt' +epprd_rg:cl_activate_fs(2.515):/sapmnt[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.515):/sapmnt[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(2.515):/sapmnt[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(2.515):/sapmnt[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(2.515):/sapmnt[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(2.515):/sapmnt[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(2.515):/sapmnt[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(2.515):/sapmnt[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(2.515):/sapmnt[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(2.515):/sapmnt[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(2.516):/sapmnt[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(2.517):/sapmnt[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(2.517):/sapmnt[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(2.519):/sapmnt[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(2.519):/sapmnt[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(2.519):/sapmnt[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(2.519):/sapmnt[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(2.519):/sapmnt[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(2.519):/sapmnt[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(2.519):/sapmnt[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(2.519):/sapmnt[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(2.400):/sapmnt[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.3 \n\t lvname = sapmntlv \n\t label = /sapmnt \n\t machine id = 44AF14B00 \n\t number lps = 20 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:41 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(2.520):/sapmnt[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(2.520):/usr/sap[activate_fs_process_group:527] PS4_LOOP=/usr/sap +epprd_rg:cl_activate_fs(2.520):/usr/sap[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(2.520):/usr/sap[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(2.520):/usr/sap[activate_fs_process_group:540] fs_mount /usr/sap fsck epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(2.520):/usr/sap[fs_mount:69] FS=/usr/sap +epprd_rg:cl_activate_fs(2.520):/usr/sap[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(2.520):/usr/sap[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(2.520):/usr/sap[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(2.520):/usr/sap[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp26739036 +epprd_rg:cl_activate_fs(2.520):/usr/sap[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(2.520):/usr/sap[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.520):/usr/sap[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(2.520):/usr/sap[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(2.520):/usr/sap[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(2.520):/usr/sap[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(2.520):/usr/sap[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(2.520):/usr/sap[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(2.520):/usr/sap[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(2.520):/usr/sap[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(2.520):/usr/sap[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(2.520):/usr/sap[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(2.520):/usr/sap[fs_mount:81] : Here check to see if the information in /etc/filesystems for /usr/sap +epprd_rg:cl_activate_fs(2.520):/usr/sap[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(2.520):/usr/sap[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(2.520):/usr/sap[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(2.520):/usr/sap[fs_mount:86] lsfs -c /usr/sap +epprd_rg:cl_activate_fs(2.521):/usr/sap[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(2.526):/usr/sap[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(2.521):/usr/sap[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/usr/sap:/dev/saplv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.526):/usr/sap[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(2.526):/usr/sap[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.527):/usr/sap[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(2.521):/usr/sap[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/usr/sap:/dev/saplv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.528):/usr/sap[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(2.529):/usr/sap[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(2.529):/usr/sap[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(2.530):/usr/sap[fs_mount:100] LV_name=saplv +epprd_rg:cl_activate_fs(2.530):/usr/sap[fs_mount:101] getlvcb -T -A saplv +epprd_rg:cl_activate_fs(2.531):/usr/sap[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(2.549):/usr/sap[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(2.531):/usr/sap[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.2 \n\t lvname = saplv \n\t label = /usr/sap \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:37 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' +epprd_rg:cl_activate_fs(2.549):/usr/sap[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(2.549):/usr/sap[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.550):/usr/sap[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(2.531):/usr/sap[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.2 \n\t lvname = saplv \n\t label = /usr/sap \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:37 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' +epprd_rg:cl_activate_fs(2.551):/usr/sap[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(2.552):/usr/sap[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(2.553):/usr/sap[fs_mount:115] clodmget -q 'name = saplv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(2.556):/usr/sap[fs_mount:115] CuAt_label=/usr/sap +epprd_rg:cl_activate_fs(2.556):/usr/sap[fs_mount:118] : At this point, if things are working correctly, /usr/sap from /etc/filesystems +epprd_rg:cl_activate_fs(2.557):/usr/sap[fs_mount:119] : should match /usr/sap from CuAt ODM and /usr/sap from the LVCB +epprd_rg:cl_activate_fs(2.557):/usr/sap[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(2.557):/usr/sap[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(2.557):/usr/sap[fs_mount:123] [[ /usr/sap != /usr/sap ]] +epprd_rg:cl_activate_fs(2.557):/usr/sap[fs_mount:128] [[ /usr/sap != /usr/sap ]] +epprd_rg:cl_activate_fs(2.557):/usr/sap[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.557):/usr/sap[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(2.557):/usr/sap[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(2.577):/usr/sap[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.577):/usr/sap[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(2.577):/usr/sap[fs_mount:160] amlog_trace '' 'Activating Filesystem|/usr/sap' +epprd_rg:cl_activate_fs(2.577):/usr/sap[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.578):/usr/sap[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.602):/usr/sap[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.605):/usr/sap[amlog_trace:319] DATE=2023-01-28T19:50:46.612837 +epprd_rg:cl_activate_fs(2.605):/usr/sap[amlog_trace:320] echo '|2023-01-28T19:50:46.612837|INFO: Activating Filesystem|/usr/sap' +epprd_rg:cl_activate_fs(2.605):/usr/sap[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.605):/usr/sap[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(2.608):/usr/sap[fs_mount:162] : Try to mount filesystem /usr/sap at Jan 28 19:50:46.000 +epprd_rg:cl_activate_fs(2.608):/usr/sap[fs_mount:163] mount /usr/sap +epprd_rg:cl_activate_fs(2.620):/usr/sap[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.620):/usr/sap[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(2.620):/usr/sap[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(2.620):/usr/sap[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/usr/sap' +epprd_rg:cl_activate_fs(2.620):/usr/sap[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.621):/usr/sap[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.646):/usr/sap[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.648):/usr/sap[amlog_trace:319] DATE=2023-01-28T19:50:46.656226 +epprd_rg:cl_activate_fs(2.648):/usr/sap[amlog_trace:320] echo '|2023-01-28T19:50:46.656226|INFO: Activating Filesystems completed|/usr/sap' +epprd_rg:cl_activate_fs(2.648):/usr/sap[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.648):/usr/sap[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(2.649):/usr/sap[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(2.649):/usr/sap[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(2.649):/usr/sap[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(2.649):/usr/sap[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(2.649):/usr/sap[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(2.649):/usr/sap[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(2.649):/usr/sap[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(2.649):/usr/sap[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(2.649):/usr/sap[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(2.650):/usr/sap[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(2.651):/usr/sap[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(2.652):/usr/sap[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(2.652):/usr/sap[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(2.652):/usr/sap[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(2.652):/usr/sap[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(2.653):/usr/sap[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(2.653):/usr/sap[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(2.653):/usr/sap[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(2.653):/usr/sap[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(2.531):/usr/sap[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.2 \n\t lvname = saplv \n\t label = /usr/sap \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:37 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(2.653):/usr/sap[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(2.653):/usr/sap[activate_fs_process_group:543] unset PS4_LOOP PS4_TIMER +epprd_rg:cl_activate_fs[activate_fs_process_group:546] : Allow any background mount operations to finish +epprd_rg:cl_activate_fs[activate_fs_process_group:548] wait +epprd_rg:cl_activate_fs[activate_fs_process_group:550] : Read cluster level Preferread read option +epprd_rg:cl_activate_fs[activate_fs_process_group:552] clodmget -n -f lvm_preferred_read HACMPcluster +epprd_rg:cl_activate_fs[activate_fs_process_group:552] cluster_pref_read=roundrobin +epprd_rg:cl_activate_fs[activate_fs_process_group:555] : Looping all file systems to update preferred read option of each lv. +epprd_rg:cl_activate_fs[activate_fs_process_group:556] : By referring VG level preferred_read option or cluster level Preferred read option +epprd_rg:cl_activate_fs[activate_fs_process_group:560] lsfs -c /board_org +epprd_rg:cl_activate_fs[activate_fs_process_group:560] 2>& 1 +epprd_rg:cl_activate_fs[activate_fs_process_group:560] FS_info=$'+epprd_rg:cl_activate_fs[activate_fs_process_group:560] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs[activate_fs_process_group:561] RC=0 +epprd_rg:cl_activate_fs[activate_fs_process_group:562] (( 0 != 0 )) +epprd_rg:cl_activate_fs[activate_fs_process_group:574] print -- $'+epprd_rg:cl_activate_fs[activate_fs_process_group:560] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs[activate_fs_process_group:574] tail -1 +epprd_rg:cl_activate_fs[activate_fs_process_group:574] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs[activate_fs_process_group:574] IFS=: +epprd_rg:cl_activate_fs[activate_fs_process_group:575] LV_name=boardlv +epprd_rg:cl_activate_fs[activate_fs_process_group:577] grep -w 'VOLUME GROUP' +epprd_rg:cl_activate_fs[activate_fs_process_group:577] lslv -L boardlv +epprd_rg:cl_activate_fs[activate_fs_process_group:577] LC_ALL=C +epprd_rg:cl_activate_fs[activate_fs_process_group:577] volume_group='LOGICAL VOLUME: boardlv VOLUME GROUP: datavg' +epprd_rg:cl_activate_fs[activate_fs_process_group:578] volume_group=datavg +epprd_rg:cl_activate_fs[activate_fs_process_group:579] volume_group=datavg +epprd_rg:cl_activate_fs[activate_fs_process_group:581] clodmget -n -f group -q name='VOLUME_GROUP and value=datavg' HACMPresource +epprd_rg:cl_activate_fs[activate_fs_process_group:581] RGName=epprd_rg +epprd_rg:cl_activate_fs[activate_fs_process_group:584] : Get the Preferred storage read option for this VG and perform chlv command +epprd_rg:cl_activate_fs[activate_fs_process_group:586] clodmget -n -f value -q name='LVM_PREFERRED_READ and volume_group=datavg' HACMPvolumegroup +epprd_rg:cl_activate_fs[activate_fs_process_group:586] 2> /dev/null +epprd_rg:cl_activate_fs[activate_fs_process_group:586] PreferredReadOption='' +epprd_rg:cl_activate_fs[activate_fs_process_group:587] [[ -z '' ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:589] PreferredReadOption=roundrobin +epprd_rg:cl_activate_fs[activate_fs_process_group:590] [[ -z roundrobin ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:590] [[ roundrobin == roundrobin ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:593] : Both VG level and Cluster level LVM Preferred Read option chosen as roundrobin. +epprd_rg:cl_activate_fs[activate_fs_process_group:595] chlv -R 0 boardlv +epprd_rg:cl_activate_fs[activate_fs_process_group:596] (( 0 != 0 )) +epprd_rg:cl_activate_fs[activate_fs_process_group:600] break +epprd_rg:cl_activate_fs[activate_fs_process_group:670] : Update the resource manager with the state of the operation +epprd_rg:cl_activate_fs[activate_fs_process_group:672] ALLNOERROR=All_non_error_filesystems +epprd_rg:cl_activate_fs[activate_fs_process_group:673] cl_RMupdate resource_up All_non_error_filesystems cl_activate_fs 2023-01-28T19:50:46.970171 2023-01-28T19:50:46.974863 +epprd_rg:cl_activate_fs[activate_fs_process_group:676] : And harvest any status from the background mount operations +epprd_rg:cl_activate_fs[activate_fs_process_group:678] [[ -f /tmp/epprd_rg_activate_fs.tmp26739036 ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:688] return 0 +epprd_rg:cl_activate_fs[activate_fs_process_resources:767] RC=0 +epprd_rg:cl_activate_fs[activate_fs_process_resources:768] (( 0 != 0 && 0 == 0 )) +epprd_rg:cl_activate_fs[activate_fs_process_resources:772] RG_FILE_SYSTEMS='' +epprd_rg:cl_activate_fs[activate_fs_process_resources:776] return 0 +epprd_rg:cl_activate_fs[851] STATUS=0 +epprd_rg:cl_activate_fs[873] return 0 +epprd_rg:process_resources(8.831)[process_file_systems:2648] RC=0 +epprd_rg:process_resources(8.831)[process_file_systems:2649] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources(8.831)[process_file_systems:2661] (( 0 != 0 )) +epprd_rg:process_resources(8.831)[process_file_systems:2687] return 0 +epprd_rg:process_resources(8.831)[3483] RC=0 +epprd_rg:process_resources(8.831)[3485] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources(8.831)[3324] true +epprd_rg:process_resources(8.831)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(8.831)[3328] set -a +epprd_rg:process_resources(8.831)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T19:50:46.988430 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(8.851)[3329] eval JOB_TYPE=SYNC_VGS ACTION=ACQUIRE VOLUME_GROUPS='"datavg"' RESOURCE_GROUPS='"epprd_rg' '"' +epprd_rg:process_resources(8.851)[1] JOB_TYPE=SYNC_VGS +epprd_rg:process_resources(8.851)[1] ACTION=ACQUIRE +epprd_rg:process_resources(8.851)[1] VOLUME_GROUPS=datavg +epprd_rg:process_resources(8.851)[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources(8.851)[3330] RC=0 +epprd_rg:process_resources(8.851)[3331] set +a +epprd_rg:process_resources(8.851)[3333] (( 0 != 0 )) +epprd_rg:process_resources(8.851)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(8.851)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(8.851)[3343] export GROUPNAME +epprd_rg:process_resources(8.851)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(8.851)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(8.851)[3360] [[ SYNC_VGS == RELEASE ]] +epprd_rg:process_resources(8.851)[3360] [[ SYNC_VGS == ONLINE ]] +epprd_rg:process_resources(8.851)[3474] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources(8.851)[3476] sync_volume_groups +epprd_rg:process_resources(8.851)[sync_volume_groups:2699] PS4_FUNC=sync_volume_groups +epprd_rg:process_resources(8.851)[sync_volume_groups:2699] typeset PS4_FUNC +epprd_rg:process_resources(8.851)[sync_volume_groups:2700] [[ high == high ]] +epprd_rg:process_resources(8.851)[sync_volume_groups:2700] set -x +epprd_rg:process_resources(8.851)[sync_volume_groups:2701] STAT=0 +epprd_rg:process_resources(8.851)[sync_volume_groups:2704] export GROUPNAME +epprd_rg:process_resources(8.852)[sync_volume_groups:2706] get_list_head datavg +epprd_rg:process_resources(8.853)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(8.853)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(8.853)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(8.853)[get_list_head:60] set -x +epprd_rg:process_resources(8.854)[get_list_head:61] echo datavg +epprd_rg:process_resources(8.854)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(8.854)[get_list_head:61] IFS=: +epprd_rg:process_resources(8.855)[get_list_head:62] echo datavg +epprd_rg:process_resources(8.855)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(8.852)[sync_volume_groups:2706] read LIST_OF_VOLUME_GROUPS_FOR_RG +epprd_rg:process_resources(8.859)[sync_volume_groups:2707] get_list_tail datavg +epprd_rg:process_resources(8.859)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(8.860)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(8.860)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(8.860)[get_list_tail:68] set -x +epprd_rg:process_resources(8.861)[get_list_tail:69] echo datavg +epprd_rg:process_resources(8.861)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(8.861)[get_list_tail:69] IFS=: +epprd_rg:process_resources(8.861)[get_list_tail:70] echo +epprd_rg:process_resources(8.860)[sync_volume_groups:2707] read VOLUME_GROUPS +epprd_rg:process_resources(8.862)[sync_volume_groups:2710] : Sync the active volume groups +epprd_rg:process_resources(8.863)[sync_volume_groups:2712] lsvg -L -o +epprd_rg:process_resources(8.863)[sync_volume_groups:2712] 2> /tmp/lsvg.err +epprd_rg:process_resources(8.863)[sync_volume_groups:2712] sort +epprd_rg:process_resources(8.865)[sync_volume_groups:2712] 1> /tmp/lsvg.out.26542458 +epprd_rg:process_resources(8.871)[sync_volume_groups:2713] echo datavg +epprd_rg:process_resources(8.872)[sync_volume_groups:2713] tr ' ' '\n' +epprd_rg:process_resources(8.874)[sync_volume_groups:2714] comm -12 /tmp/lsvg.out.26542458 - +epprd_rg:process_resources(8.874)[sync_volume_groups:2714] sort +epprd_rg:process_resources(8.879)[sync_volume_groups:2718] [[ -s /tmp/lsvg.err ]] +epprd_rg:process_resources(8.880)[sync_volume_groups:2723] rm -f /tmp/lsvg.out.26542458 /tmp/lsvg.err +epprd_rg:process_resources(8.880)[sync_volume_groups:2716] cl_sync_vgs datavg +epprd_rg:process_resources(8.884)[sync_volume_groups:2732] unset AM_SYNC_CALLED_BY +epprd_rg:process_resources(8.884)[sync_volume_groups:2734] return 0 +epprd_rg:process_resources(8.884)[3324] true +epprd_rg:process_resources(8.884)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(8.884)[3328] set -a +epprd_rg:process_resources(8.884)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa +epprd_rg:cl_sync_vgs[303] version=1.24.1.4 +epprd_rg:cl_sync_vgs[306] (( 1 == 0 )) +epprd_rg:cl_sync_vgs[312] : syncing 4 stale PPs at a time seems to be a win most of the time, but +epprd_rg:cl_sync_vgs[313] : we honor the NUM_PARALLEL_LPS value from /etc/environment, as does +epprd_rg:cl_sync_vgs[314] : syncvg. +epprd_rg:cl_sync_vgs[316] syncflag='' +epprd_rg:cl_sync_vgs[316] export syncflag +epprd_rg:cl_sync_vgs[317] PS4_LOOP='' +epprd_rg:cl_sync_vgs[317] export PS4_LOOP +epprd_rg:cl_sync_vgs[318] typeset -i npl +epprd_rg:cl_sync_vgs[319] grep -q ^NUM_PARALLEL_LPS= /etc/environment 2023-01-28T19:50:47.042228 clrgpa +epprd_rg:cl_sync_vgs[321] syncflag=-P4 +epprd_rg:cl_sync_vgs[328] echo 'NOTE: While the sync is going on, volume group can be used' NOTE: While the sync is going on, volume group can be used +epprd_rg:cl_sync_vgs[331] : For GLVM volume groups, read PARALLEL LPS count from HACMPresource if it is set from GUI, +epprd_rg:cl_sync_vgs[332] : else, read from environment variables, if it is not set use 32 as default value. +epprd_rg:cl_sync_vgs[334] clodmget -q name='GMVG_REP_RESOURCE and value=datavg' -f group HACMPresource +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:cl_sync_vgs[334] 2> /dev/null +epprd_rg:process_resources(8.898)[3329] eval JOB_TYPE=EXPORT_FILESYSTEMS ACTION=ACQUIRE EXPORT_FILE_SYSTEMS='"/board_org,/sapmnt/EPP"' EXPORT_FILE_SYSTEMS_V4='""' RESOURCE_GROUPS='"epprd_rg' '"' STABLE_STORAGE_PATH='""' IP_LABELS='"epprd:epprda:epprds"' DAEMONS='"NFS' 'RPCLOCKD"' +epprd_rg:process_resources(8.898)[1] JOB_TYPE=EXPORT_FILESYSTEMS +epprd_rg:process_resources(8.898)[1] ACTION=ACQUIRE +epprd_rg:process_resources(8.898)[1] EXPORT_FILE_SYSTEMS=/board_org,/sapmnt/EPP +epprd_rg:process_resources(8.898)[1] EXPORT_FILE_SYSTEMS_V4='' +epprd_rg:process_resources(8.898)[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources(8.898)[1] STABLE_STORAGE_PATH='' +epprd_rg:process_resources(8.898)[1] IP_LABELS=epprd:epprda:epprds +epprd_rg:process_resources(8.898)[1] DAEMONS='NFS RPCLOCKD' +epprd_rg:process_resources(8.898)[3330] RC=0 +epprd_rg:process_resources(8.898)[3331] set +a +epprd_rg:process_resources(8.898)[3333] (( 0 != 0 )) +epprd_rg:process_resources(8.898)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(8.898)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(8.898)[3343] export GROUPNAME +epprd_rg:process_resources(8.898)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(8.898)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(8.898)[3360] [[ EXPORT_FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources(8.898)[3360] [[ EXPORT_FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources(8.899)[3595] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources(8.899)[3597] export_filesystems +epprd_rg:process_resources(8.899)[export_filesystems:1621] PS4_FUNC=export_filesystems +epprd_rg:process_resources(8.899)[export_filesystems:1621] typeset PS4_FUNC +epprd_rg:process_resources(8.899)[export_filesystems:1622] [[ high == high ]] +epprd_rg:process_resources(8.899)[export_filesystems:1622] set -x +epprd_rg:process_resources(8.899)[export_filesystems:1623] STAT=0 +epprd_rg:process_resources(8.899)[export_filesystems:1624] NFSSTOPPED=0 +epprd_rg:process_resources(8.899)[export_filesystems:1629] [[ NFS == RPCLOCKD ]] +epprd_rg:process_resources(8.899)[export_filesystems:1629] [[ RPCLOCKD == RPCLOCKD ]] +epprd_rg:process_resources(8.899)[export_filesystems:1631] stopsrc -s rpc.lockd +epprd_rg:cl_sync_vgs[334] glvm_rg='' +epprd_rg:cl_sync_vgs[335] [[ -n '' ]] +epprd_rg:cl_sync_vgs[353] check_sync datavg +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:76] typeset vg_name +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:77] typeset vgid +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:78] typeset disklist +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:79] typeset lv_name +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:80] typeset -li stale_count +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:81] typeset -li mode +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:82] RC=0 +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:82] typeset -li RC +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:83] typeset site_node_list +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:84] typeset site_choice +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:86] vg_name=datavg +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:87] disklist='' +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:89] getlvodm -v datavg +epprd_rg:cl_sync_vgs(0.020):datavg[check_sync:89] vgid=00c44af100004b00000001851e9dc053 +epprd_rg:cl_sync_vgs(0.020):datavg[check_sync:92] : find disks in the VG that LVM thinks are inaccessable +epprd_rg:cl_sync_vgs(0.020):datavg[check_sync:94] lsvg -L -p datavg +epprd_rg:cl_sync_vgs(0.021):datavg[check_sync:94] LC_ALL=C 0513-044 The rpc.lockd Subsystem was requested to stop. +epprd_rg:process_resources(8.908)[export_filesystems:1633] touch /tmp/.RPCLOCKDSTOPPED +epprd_rg:process_resources(8.912)[export_filesystems:1638] : For NFSv4, cl_export_fs will use STABLE_STORAGE_PATH, which is set by +epprd_rg:process_resources(8.912)[export_filesystems:1639] : clRGPA and can have colon-separated values for multiple RGs. +epprd_rg:process_resources(8.912)[export_filesystems:1640] : We will save off clRGPA values in stable_storage_path and then extract +epprd_rg:process_resources(8.912)[export_filesystems:1641] : each RG into STABLE_STORAGE_PATH for cl_unexport_fs. +epprd_rg:process_resources(8.912)[export_filesystems:1643] stable_storage_path='' +epprd_rg:process_resources(8.912)[export_filesystems:1643] typeset stable_storage_path +epprd_rg:process_resources(8.912)[export_filesystems:1645] export NFSSTOPPED +epprd_rg:process_resources(8.912)[export_filesystems:1650] export GROUPNAME +epprd_rg:process_resources(8.913)[export_filesystems:1652] get_list_head /board_org,/sapmnt/EPP +epprd_rg:process_resources(8.913)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(8.913)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(8.913)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(8.913)[get_list_head:60] set -x +epprd_rg:process_resources(8.914)[get_list_head:61] echo /board_org,/sapmnt/EPP +epprd_rg:process_resources(8.915)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(8.915)[get_list_head:61] IFS=: +epprd_rg:process_resources(8.916)[get_list_head:62] echo /board_org,/sapmnt/EPP +epprd_rg:process_resources(8.917)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(8.920)[export_filesystems:1652] read LIST_OF_EXPORT_FILE_SYSTEMS_FOR_RG +epprd_rg:process_resources(8.921)[export_filesystems:1653] get_list_tail /board_org,/sapmnt/EPP +epprd_rg:process_resources(8.922)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(8.922)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(8.922)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(8.922)[get_list_tail:68] set -x +epprd_rg:process_resources(8.923)[get_list_tail:69] echo /board_org,/sapmnt/EPP +epprd_rg:process_resources(8.923)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(8.923)[get_list_tail:69] IFS=: +epprd_rg:process_resources(8.923)[get_list_tail:70] echo +epprd_rg:process_resources(8.922)[export_filesystems:1653] read EXPORT_FILE_SYSTEMS +epprd_rg:process_resources(8.926)[export_filesystems:1654] get_list_head +epprd_rg:process_resources(8.926)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(8.926)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(8.926)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(8.926)[get_list_head:60] set -x +epprd_rg:process_resources(8.927)[get_list_head:61] echo +epprd_rg:process_resources(8.927)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(8.927)[get_list_head:61] IFS=: +epprd_rg:process_resources(8.930)[get_list_head:62] echo +epprd_rg:process_resources(8.930)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(8.929)[export_filesystems:1654] read LIST_OF_EXPORT_FILE_SYSTEMS_V4_FOR_RG +epprd_rg:process_resources(8.934)[export_filesystems:1655] get_list_tail +epprd_rg:process_resources(8.935)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(8.935)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(8.935)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(8.935)[get_list_tail:68] set -x +epprd_rg:process_resources(8.936)[get_list_tail:69] echo +epprd_rg:process_resources(8.936)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(8.936)[get_list_tail:69] IFS=: +epprd_rg:process_resources(8.936)[get_list_tail:70] echo +epprd_rg:process_resources(8.935)[export_filesystems:1655] read EXPORT_FILE_SYSTEMS_V4 +epprd_rg:process_resources(8.939)[export_filesystems:1656] get_list_head +epprd_rg:process_resources(8.939)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(8.939)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(8.939)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(8.939)[get_list_head:60] set -x +epprd_rg:process_resources(8.941)[get_list_head:61] echo +epprd_rg:process_resources(8.940)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(8.941)[get_list_head:61] IFS=: +epprd_rg:process_resources(8.942)[get_list_head:62] echo +epprd_rg:process_resources(8.942)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(8.939)[export_filesystems:1656] read STABLE_STORAGE_PATH +epprd_rg:process_resources(8.945)[export_filesystems:1657] get_list_tail +epprd_rg:process_resources(8.946)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(8.946)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(8.946)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(8.946)[get_list_tail:68] set -x +epprd_rg:process_resources(8.947)[get_list_tail:69] echo +epprd_rg:cl_sync_vgs(0.064):datavg[check_sync:94] disklist=$'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:cl_sync_vgs(0.066):datavg[check_sync:95] print -- $'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:cl_sync_vgs(0.066):datavg[check_sync:95] grep -w missing +epprd_rg:process_resources(8.950)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(8.950)[get_list_tail:69] IFS=: +epprd_rg:process_resources(8.951)[get_list_tail:70] echo +epprd_rg:cl_sync_vgs(0.068):datavg[check_sync:95] cut -f1 '-d ' +epprd_rg:process_resources(8.952)[export_filesystems:1657] read stable_storage_path +epprd_rg:process_resources(8.952)[export_filesystems:1659] cl_export_fs epprd:epprda:epprds '/board_org /sapmnt/EPP' '' +epprd_rg:cl_sync_vgs(0.071):datavg[check_sync:95] missing_disklist='' +epprd_rg:cl_sync_vgs(0.072):datavg[check_sync:96] print -- $'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:cl_sync_vgs(0.074):datavg[check_sync:96] grep -w removed +epprd_rg:cl_sync_vgs(0.076):datavg[check_sync:96] cut -f1 '-d ' +epprd_rg:cl_export_fs[102] version=%I% +epprd_rg:cl_export_fs[105] . /usr/es/sbin/cluster/events/utils/cl_nfs_utils +epprd_rg:cl_export_fs[98] PROGNAME=cl_export_fs +epprd_rg:cl_export_fs[99] [[ high == high ]] +epprd_rg:cl_export_fs[101] set -x +epprd_rg:cl_export_fs[102] version=%I +epprd_rg:cl_export_fs[105] cl_exports_data='' +epprd_rg:cl_export_fs[105] typeset cl_exports_data +epprd_rg:cl_export_fs[106] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_export_fs[107] HOST=epprd:epprda:epprds +epprd_rg:cl_export_fs[108] EXPORT_V3='/board_org /sapmnt/EPP' +epprd_rg:cl_export_fs[109] EXPORT_V4='' +epprd_rg:cl_export_fs[111] STATUS=0 +epprd_rg:cl_export_fs[113] LIMIT=60 +epprd_rg:cl_export_fs[113] WAIT=1 +epprd_rg:cl_export_fs[113] TRY=0 +epprd_rg:cl_export_fs[113] typeset -li LIMIT WAIT TRY +epprd_rg:cl_export_fs[115] PROC_RES=false +epprd_rg:cl_export_fs[118] : If JOB_TYPE is set, and it does not equal to GROUP, then +epprd_rg:cl_export_fs[119] : we are processing for process_resources +epprd_rg:cl_export_fs[121] [[ EXPORT_FILESYSTEMS != 0 ]] +epprd_rg:cl_export_fs[121] [[ EXPORT_FILESYSTEMS != GROUP ]] +epprd_rg:cl_export_fs[122] PROC_RES=true +epprd_rg:cl_export_fs[125] set -u +epprd_rg:cl_export_fs[127] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_export_fs[129] (( 3 < 2 || 3 > 3 )) +epprd_rg:cl_export_fs[142] DARE_EVENT=reconfig_resource_acquire +epprd_rg:cl_export_fs[145] : Check memory to see if NFSv4 exports have been configured. +epprd_rg:cl_export_fs[147] export_v4='' +epprd_rg:cl_export_fs[148] [[ -z '' ]] +epprd_rg:cl_export_fs[148] [[ rg_move == reconfig_resource_acquire ]] +epprd_rg:cl_export_fs[158] : If we do not have NFSv4 exports configured, then determine +epprd_rg:cl_export_fs[159] : the protocol versions from the HACMP exports file. +epprd_rg:cl_export_fs[161] [[ -z '' ]] +epprd_rg:cl_export_fs[161] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[163] export_v3='' +epprd_rg:cl_export_fs[169] : Get the export file for the filesystem from the exports file. +epprd_rg:cl_export_fs[170] : Only look at the part of the line preceding comments. +epprd_rg:cl_export_fs[173] getline_exports /board_org +epprd_rg:cl_export_fs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_export_fs[getline_exports:45] line='' +epprd_rg:cl_export_fs[getline_exports:45] typeset line +epprd_rg:cl_export_fs[getline_exports:46] flag=0 +epprd_rg:cl_export_fs[getline_exports:46] typeset -i flag +epprd_rg:cl_export_fs[getline_exports:47] fs=/board_org +epprd_rg:cl_export_fs[getline_exports:49] [[ -z /board_org ]] +epprd_rg:cl_export_fs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_sync_vgs(0.079):datavg[check_sync:96] removed_disklist='' +epprd_rg:cl_sync_vgs(0.079):datavg[check_sync:100] : Proceeed if there are some disks that LVM thinks are inaccessable +epprd_rg:cl_sync_vgs(0.079):datavg[check_sync:102] [[ -n '' ]] +epprd_rg:cl_sync_vgs(0.079):datavg[check_sync:102] [[ -n '' ]] +epprd_rg:cl_sync_vgs(0.079):datavg[check_sync:196] : sync if any LVs in the VG that have stale partitions +epprd_rg:cl_sync_vgs(0.079):datavg[check_sync:198] (( 0 == 0 )) +epprd_rg:cl_sync_vgs(0.079):datavg[check_sync:201] : A status of 2,3,5 or 7 indicates the presence of dirty and/or stale partitions +epprd_rg:cl_sync_vgs(0.079):datavg[check_sync:213] is_start_logged=0 +epprd_rg:cl_sync_vgs(0.079):datavg[check_sync:218] at_least_one_sync_success=0 +epprd_rg:cl_export_fs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_sync_vgs(0.080):datavg[check_sync:219] lqueryvg -g 00c44af100004b00000001851e9dc053 -L +epprd_rg:cl_export_fs[getline_exports:56] read -r line +epprd_rg:cl_export_fs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_export_fs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_export_fs[getline_exports:69] (( 1 == 0 )) +epprd_rg:cl_export_fs[getline_exports:74] [[ 0 == 1 ]] +epprd_rg:cl_export_fs[getline_exports:56] read -r line +epprd_rg:cl_export_fs[getline_exports:59] line='/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_export_fs[getline_exports:60] line='/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_export_fs[getline_exports:63] [[ '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' == #* ]] +epprd_rg:cl_export_fs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_export_fs[getline_exports:68] echo '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_export_fs[getline_exports:69] (( 0 == 0 )) +epprd_rg:cl_export_fs[getline_exports:71] flag=1 +epprd_rg:cl_export_fs[getline_exports:74] [[ 1 == 1 ]] +epprd_rg:cl_export_fs[getline_exports:76] echo '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_export_fs[getline_exports:76] grep -w '\\' +epprd_rg:cl_export_fs[getline_exports:76] [[ -n '' ]] +epprd_rg:cl_export_fs[getline_exports:81] cl_exports_data=' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_export_fs[getline_exports:82] break +epprd_rg:cl_export_fs[getline_exports:89] return 0 +epprd_rg:cl_export_fs[174] export_line=' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_export_fs[177] : The line is of the format: filesystem -option1,option2,... +epprd_rg:cl_export_fs[178] : This will give 'option1 option2 ...' +epprd_rg:cl_export_fs[180] echo /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds +epprd_rg:cl_export_fs[180] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_export_fs[180] cut -d- -f2- +epprd_rg:cl_export_fs[181] tr , ' ' +epprd_rg:cl_export_fs[180] options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap:epprd:epprda:epprds root=epprdap:epprd:epprda:epprds ' +epprd_rg:cl_export_fs[184] : Each option can be of the format name=value, or just name. +epprd_rg:cl_export_fs[185] : We only care about the vers option. +epprd_rg:cl_export_fs[189] : Have we seen the vers 'option?' +epprd_rg:cl_export_fs[191] vers_missing=1 +epprd_rg:cl_export_fs[194] : Loop through all of the export options for this export. +epprd_rg:cl_export_fs[218] : If we did not find the vers option, then NFSv3 is the default. +epprd_rg:cl_export_fs[220] (( vers_missing )) +epprd_rg:cl_export_fs[220] export_v3=' /board_org' +epprd_rg:cl_export_fs[169] : Get the export file for the filesystem from the exports file. +epprd_rg:cl_export_fs[170] : Only look at the part of the line preceding comments. +epprd_rg:cl_export_fs[173] getline_exports /sapmnt/EPP +epprd_rg:cl_export_fs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_export_fs[getline_exports:45] line='' +epprd_rg:cl_export_fs[getline_exports:45] typeset line +epprd_rg:cl_export_fs[getline_exports:46] flag=0 +epprd_rg:cl_export_fs[getline_exports:46] typeset -i flag +epprd_rg:cl_export_fs[getline_exports:47] fs=/sapmnt/EPP +epprd_rg:cl_export_fs[getline_exports:49] [[ -z /sapmnt/EPP ]] +epprd_rg:cl_export_fs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_sync_vgs(0.081):datavg[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.106):datavg[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.106):datavg.epprdaloglv[check_sync:221] PS4_LOOP=datavg.epprdaloglv +epprd_rg:cl_sync_vgs(0.106):datavg.epprdaloglv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.106):datavg.epprdaloglv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.106):datavg.epprdaloglv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.106):datavg.epprdaloglv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.106):datavg.epprdaloglv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.106):datavg.saplv[check_sync:221] PS4_LOOP=datavg.saplv +epprd_rg:cl_sync_vgs(0.106):datavg.saplv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.106):datavg.saplv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.106):datavg.saplv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.106):datavg.saplv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.106):datavg.saplv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.106):datavg.sapmntlv[check_sync:221] PS4_LOOP=datavg.sapmntlv +epprd_rg:cl_sync_vgs(0.106):datavg.sapmntlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.106):datavg.sapmntlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.106):datavg.sapmntlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.106):datavg.sapmntlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.106):datavg.sapmntlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.106):datavg.oraclelv[check_sync:221] PS4_LOOP=datavg.oraclelv +epprd_rg:cl_sync_vgs(0.106):datavg.oraclelv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.106):datavg.oraclelv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.106):datavg.oraclelv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.106):datavg.oraclelv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.106):datavg.oraclelv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.106):datavg.epplv[check_sync:221] PS4_LOOP=datavg.epplv +epprd_rg:cl_sync_vgs(0.106):datavg.epplv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.106):datavg.epplv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.106):datavg.epplv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.106):datavg.epplv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.106):datavg.epplv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.106):datavg.oraarchlv[check_sync:221] PS4_LOOP=datavg.oraarchlv +epprd_rg:cl_sync_vgs(0.106):datavg.oraarchlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.106):datavg.oraarchlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.106):datavg.oraarchlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.106):datavg.oraarchlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.106):datavg.oraarchlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.106):datavg.sapdata1lv[check_sync:221] PS4_LOOP=datavg.sapdata1lv +epprd_rg:cl_sync_vgs(0.106):datavg.sapdata1lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.106):datavg.sapdata1lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.106):datavg.sapdata1lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.106):datavg.sapdata1lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.106):datavg.sapdata1lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.106):datavg.sapdata2lv[check_sync:221] PS4_LOOP=datavg.sapdata2lv +epprd_rg:cl_sync_vgs(0.106):datavg.sapdata2lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 &&+epprd_rg:cl_export_fs[getline_exports:56] read -r line +epprd_rg:cl_export_fs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] 1 != 7 )) +epprd_rg:cl_sync_vgs(0.106):datavg.sapdata2lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.106):datavg.sapdata2lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.106):datavg.sapdata2lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.106):datavg.sapdata2lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.106):datavg.sapdata3lv[check_sync:221] PS4_LOOP=datavg.sapdata3lv +epprd_rg:cl_sync_vgs(0.106):datavg.sapdata3lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.106):datavg.sapdata3lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.106):datavg.sapdata3lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.106):datavg.sapdata3lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.106):datavg.sapdata3lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.106):datavg.sapdata4lv[check_sync:221] PS4_LOOP=datavg.sapdata4lv +epprd_rg:cl_sync_vgs(0.106):datavg.sapdata4lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.106):datavg.sapdata4lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.106):datavg.sapdata4lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.106):datavg.sapdata4lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.106):datavg.sapdata4lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.106):datavg.boardlv[check_sync:221] PS4_LOOP=datavg.boardlv +epprd_rg:cl_sync_vgs(0.106):datavg.boardlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.106):datavg.boardlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.106):datavg.boardlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.106):datavg.boardlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.106):datavg.boardlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.106):datavg.origlogAlv[check_sync:221] PS4_LOOP=datavg.origlogAlv +epprd_rg:cl_sync_vgs(0.106):datavg.origlogAlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.106):datavg.origlogAlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.106):datavg.origlogAlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.106):datavg.origlogAlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.107):datavg.origlogAlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.107):datavg.origlogBlv[check_sync:221] PS4_LOOP=datavg.origlogBlv +epprd_rg:cl_sync_vgs(0.107):datavg.origlogBlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.107):datavg.origlogBlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.107):datavg.origlogBlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.107):datavg.origlogBlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.107):datavg.origlogBlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.107):datavg.mirrlogAlv[check_sync:221] PS4_LOOP=datavg.mirrlogAlv +epprd_rg:cl_sync_vgs(0.107):datavg.mirrlogAlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.107):datavg.mirrlogAlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.107):datavg.mirrlogAlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.107):datavg.mirrlogAlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.107):datavg.mirrlogAlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.107):datavg.mirrlogBlv[check_sync:221] PS4_LOOP=datavg.mirrlogBlv +epprd_rg:cl_sync_vgs(0.107):datavg.mirrlogBlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.107):datavg.mirrlogBlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.107):datavg.mirrlogBlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.107):datavg.mirrlogBlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.107):datavg.mirrlogBlv[check_sync:268] [[ -n RG_MOVE ]] +epprd_rg:cl_sync_vgs(0.107):datavg.mirrlogBlv[check_sync:268] (( 0 == 1 )) +epprd_rg:cl_sync_vgs[355] exit 0 +epprd_rg:cl_export_fs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:68] grep -q '^[[:space:]]*/sapmnt/EPP[[:space:]]' +epprd_rg:cl_export_fs[getline_exports:69] (( 0 == 0 )) +epprd_rg:cl_export_fs[getline_exports:71] flag=1 +epprd_rg:cl_export_fs[getline_exports:74] [[ 1 == 1 ]] +epprd_rg:cl_export_fs[getline_exports:76] grep -w '\\' +epprd_rg:cl_export_fs[getline_exports:76] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:76] [[ -n '' ]] +epprd_rg:cl_export_fs[getline_exports:81] cl_exports_data=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:82] break +epprd_rg:cl_export_fs[getline_exports:89] return 0 +epprd_rg:cl_export_fs[174] export_line=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[177] : The line is of the format: filesystem -option1,option2,... +epprd_rg:cl_export_fs[178] : This will give 'option1 option2 ...' +epprd_rg:cl_export_fs[180] echo /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_export_fs[180] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_export_fs[180] cut -d- -f2- +epprd_rg:cl_export_fs[181] tr , ' ' +epprd_rg:cl_export_fs[180] options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap root=epprdap ' +epprd_rg:cl_export_fs[184] : Each option can be of the format name=value, or just name. +epprd_rg:cl_export_fs[185] : We only care about the vers option. +epprd_rg:cl_export_fs[189] : Have we seen the vers 'option?' +epprd_rg:cl_export_fs[191] vers_missing=1 +epprd_rg:cl_export_fs[194] : Loop through all of the export options for this export. +epprd_rg:cl_export_fs[218] : If we did not find the vers option, then NFSv3 is the default. +epprd_rg:cl_export_fs[220] (( vers_missing )) +epprd_rg:cl_export_fs[220] export_v3=' /board_org /sapmnt/EPP' +epprd_rg:cl_export_fs[223] EXPORT_V3=' /board_org /sapmnt/EPP' +epprd_rg:cl_export_fs[224] EXPORT_V4='' +epprd_rg:cl_export_fs[227] /usr/sbin/bootinfo -K +epprd_rg:cl_export_fs[227] KERNEL_BITS=64 +epprd_rg:cl_export_fs[229] subsystems='nfsd rpc.mountd' +epprd_rg:cl_export_fs[230] [[ -n '' ]] +epprd_rg:cl_export_fs[233] : Special processing for cross mounts of EFS keys +epprd_rg:cl_export_fs[234] : The overmount of /var/efs must be removed prior +epprd_rg:cl_export_fs[235] : to stopping or restarting NFS, since the SRC +epprd_rg:cl_export_fs[236] : operations will attempt to check the EFS enablement. +epprd_rg:cl_export_fs[238] grep -w /var/efs +epprd_rg:cl_export_fs[238] mount +epprd_rg:cl_export_fs[238] mounted_info='' +epprd_rg:cl_export_fs[239] [[ -n '' ]] +epprd_rg:cl_export_fs[295] : Kill and restart everything in '"nfsd' 'rpc.mountd"' +epprd_rg:cl_export_fs[299] : Kill nfsd, and restart it below +epprd_rg:cl_export_fs[306] [[ nfsd == nfsd ]] +epprd_rg:cl_export_fs[307] [[ 64 == 64 ]] +epprd_rg:cl_export_fs[307] [[ -x /usr/sbin/nfs4smctl ]] +epprd_rg:cl_export_fs[308] [[ ! -s /etc/xtab ]] +epprd_rg:cl_export_fs[311] clcheck_server nfsd +epprd_rg:clcheck_server[118] [[ high == high ]] +epprd_rg:clcheck_server[118] version=1.10.4.2 +epprd_rg:clcheck_server[119] cl_get_path +epprd_rg:clcheck_server[119] HA_DIR=es +epprd_rg:clcheck_server[121] SERVER=nfsd +epprd_rg:clcheck_server[122] STATUS=0 +epprd_rg:clcheck_server[123] FATAL_ERROR=255 +epprd_rg:clcheck_server[124] retries=0 +epprd_rg:clcheck_server[124] typeset -li retries +epprd_rg:clcheck_server[126] [[ -n nfsd ]] +epprd_rg:clcheck_server[131] lssrc -s nfsd +epprd_rg:clcheck_server[131] LC_ALL=C +epprd_rg:clcheck_server[131] grep 'not on file' +epprd_rg:clcheck_server[131] wc -l +epprd_rg:clcheck_server[131] rc=' 0' +epprd_rg:clcheck_server[133] (( 0 == 1 )) +epprd_rg:clcheck_server[143] [[ 0 =~ 3 ]] +epprd_rg:clcheck_server[147] lssrc -s nfsd +epprd_rg:clcheck_server[147] 1> /dev/null 2> /dev/null +epprd_rg:clcheck_server[161] egrep 'stop|active' +epprd_rg:clcheck_server[161] lssrc -s nfsd +epprd_rg:clcheck_server[161] LC_ALL=C +epprd_rg:clcheck_server[161] check_if_down=' nfsd nfs 28377402 active' +epprd_rg:clcheck_server[166] [[ -z ' nfsd nfs 28377402 active' ]] +epprd_rg:clcheck_server[187] check_server_extended nfsd +epprd_rg:clcheck_server[check_server_extended:55] [[ high == high ]] +epprd_rg:clcheck_server[check_server_extended:55] set -x +epprd_rg:clcheck_server[check_server_extended:58] SERVER=nfsd +epprd_rg:clcheck_server[check_server_extended:58] typeset SERVER +epprd_rg:clcheck_server[check_server_extended:59] STATUS=1 +epprd_rg:clcheck_server[check_server_extended:59] typeset STATUS +epprd_rg:clcheck_server[check_server_extended:87] echo 1 +epprd_rg:clcheck_server[check_server_extended:88] return +epprd_rg:clcheck_server[187] STATUS=1 +epprd_rg:clcheck_server[188] return 1 +epprd_rg:cl_export_fs[329] : nfsv4 daemon not stopped due to existing mounts +epprd_rg:cl_export_fs[330] : Turn on NFSv4 grace periods and ignore any errors. +epprd_rg:cl_export_fs[332] chnfs -I -g on -x 1 +epprd_rg:cl_export_fs[332] ODMDIR=/etc/objrepos 0513-077 Subsystem has been changed. 0513-077 Subsystem has been changed. +epprd_rg:cl_export_fs[299] : Kill rpc.mountd, and restart it below +epprd_rg:cl_export_fs[306] [[ rpc.mountd == nfsd ]] +epprd_rg:cl_export_fs[336] : Friendly stop of rpc.mountd +epprd_rg:cl_export_fs[338] lssrc -s rpc.mountd +epprd_rg:cl_export_fs[338] LC_ALL=C +epprd_rg:cl_export_fs[338] tail +2 +epprd_rg:cl_export_fs[338] grep -qw active +epprd_rg:cl_export_fs[338] stopsrc -s rpc.mountd 0513-044 The rpc.mountd Subsystem was requested to stop. +epprd_rg:cl_export_fs[341] : Now, wait for rpc.mountd to die +epprd_rg:cl_export_fs[343] (( TRY=0)) +epprd_rg:cl_export_fs[343] (( 0 < 60)) +epprd_rg:cl_export_fs[345] lssrc -s rpc.mountd +epprd_rg:cl_export_fs[345] LC_ALL=C +epprd_rg:cl_export_fs[345] tail +2 +epprd_rg:cl_export_fs[345] subsys_state=' rpc.mountd nfs inoperative' +epprd_rg:cl_export_fs[346] print -- ' rpc.mountd nfs inoperative' +epprd_rg:cl_export_fs[346] grep -qw inoperative +epprd_rg:cl_export_fs[348] [[ high == high ]] +epprd_rg:cl_export_fs[348] set -x +epprd_rg:cl_export_fs[349] subsys_state=inoperative +epprd_rg:cl_export_fs[350] break +epprd_rg:cl_export_fs[356] [[ high == high ]] +epprd_rg:cl_export_fs[356] set -x +epprd_rg:cl_export_fs[358] [[ inoperative != inoperative ]] +epprd_rg:cl_export_fs[382] : If stopsrc has failed to stop rpc.mountd, +epprd_rg:cl_export_fs[383] : use a real kill on the daemon +epprd_rg:cl_export_fs[385] ps -eo comm,pid +epprd_rg:cl_export_fs[385] grep -w rpc.mountd +epprd_rg:cl_export_fs[385] grep -vw grep +epprd_rg:cl_export_fs[385] read skip subsys_pid rest +epprd_rg:cl_export_fs[386] [[ '' == +([0-9]) ]] +epprd_rg:cl_export_fs[389] : If rpc.mountd has been stopped, +epprd_rg:cl_export_fs[390] : start it back up again. +epprd_rg:cl_export_fs[392] clcheck_server rpc.mountd +epprd_rg:clcheck_server[118] [[ high == high ]] +epprd_rg:clcheck_server[118] version=1.10.4.2 +epprd_rg:clcheck_server[119] cl_get_path +epprd_rg:clcheck_server[119] HA_DIR=es +epprd_rg:clcheck_server[121] SERVER=rpc.mountd +epprd_rg:clcheck_server[122] STATUS=0 +epprd_rg:clcheck_server[123] FATAL_ERROR=255 +epprd_rg:clcheck_server[124] retries=0 +epprd_rg:clcheck_server[124] typeset -li retries +epprd_rg:clcheck_server[126] [[ -n rpc.mountd ]] +epprd_rg:clcheck_server[131] lssrc -s rpc.mountd +epprd_rg:clcheck_server[131] LC_ALL=C +epprd_rg:clcheck_server[131] grep 'not on file' +epprd_rg:clcheck_server[131] wc -l +epprd_rg:clcheck_server[131] rc=' 0' +epprd_rg:clcheck_server[133] (( 0 == 1 )) +epprd_rg:clcheck_server[143] [[ 0 =~ 3 ]] +epprd_rg:clcheck_server[147] lssrc -s rpc.mountd +epprd_rg:clcheck_server[147] 1> /dev/null 2> /dev/null +epprd_rg:clcheck_server[161] lssrc -s rpc.mountd +epprd_rg:clcheck_server[161] LC_ALL=C +epprd_rg:clcheck_server[161] egrep 'stop|active' +epprd_rg:clcheck_server[161] check_if_down='' +epprd_rg:clcheck_server[166] [[ -z '' ]] +epprd_rg:clcheck_server[171] sleep 1 +epprd_rg:clcheck_server[172] lssrc -s rpc.mountd +epprd_rg:clcheck_server[172] LC_ALL=C +epprd_rg:clcheck_server[172] egrep 'stop|active' +epprd_rg:clcheck_server[172] check_if_down='' +epprd_rg:clcheck_server[173] [[ -z '' ]] +epprd_rg:clcheck_server[177] return 0 +epprd_rg:cl_export_fs[394] [[ rpc.mountd == nfsd ]] +epprd_rg:cl_export_fs[403] : Start rpc.mountd back up again +epprd_rg:cl_export_fs[405] startsrc -s rpc.mountd 0513-059 The rpc.mountd Subsystem has been started. Subsystem PID is 28901690. +epprd_rg:cl_export_fs[406] rc=0 +epprd_rg:cl_export_fs[407] (( 0 == 0 )) +epprd_rg:cl_export_fs[409] sleep 3 +epprd_rg:cl_export_fs[410] lssrc -s rpc.mountd +epprd_rg:cl_export_fs[410] LC_ALL=C +epprd_rg:cl_export_fs[410] tail +2 +epprd_rg:cl_export_fs[410] subsys_state=' rpc.mountd nfs 28901690 active' +epprd_rg:cl_export_fs[413] (( 0 != 0 )) +epprd_rg:cl_export_fs[413] print -- ' rpc.mountd nfs 28901690 active' +epprd_rg:cl_export_fs[413] grep -qw active +epprd_rg:cl_export_fs[431] : Set the NFSv4 nfsroot parameter. This must be set prior to any +epprd_rg:cl_export_fs[432] : NFS exports that use the exname option, and cannot be set to a new +epprd_rg:cl_export_fs[433] : value if any exname exports already exist. This is normally done +epprd_rg:cl_export_fs[434] : at IPL, but rc.nfs is not run at boot when HACMP is installed. +epprd_rg:cl_export_fs[436] [[ -n '' ]] +epprd_rg:cl_export_fs[438] hasrv='' +epprd_rg:cl_export_fs[440] [[ -z '' ]] +epprd_rg:cl_export_fs[442] query=name='STABLE_STORAGE_PATH AND group=epprd_rg' +epprd_rg:cl_export_fs[443] odmget -q name='STABLE_STORAGE_PATH AND group=epprd_rg' HACMPresource +epprd_rg:cl_export_fs[444] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_export_fs[443] STABLE_STORAGE_PATH='' +epprd_rg:cl_export_fs[447] [[ -z '' ]] +epprd_rg:cl_export_fs[449] STABLE_STORAGE_PATH=/var/adm/nfsv4.hacmp/epprd_rg +epprd_rg:cl_export_fs[452] [[ -z '' ]] +epprd_rg:cl_export_fs[454] query=name='STABLE_STORAGE_COOKIE AND group=epprd_rg' +epprd_rg:cl_export_fs[455] odmget -q name='STABLE_STORAGE_COOKIE AND group=epprd_rg' HACMPresource +epprd_rg:cl_export_fs[456] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_export_fs[455] STABLE_STORAGE_COOKIE='' +epprd_rg:cl_export_fs[459] [[ -n epprd_rg ]] +epprd_rg:cl_export_fs[461] odmget -q 'name = SERVICE_LABEL and group = epprd_rg' HACMPresource +epprd_rg:cl_export_fs[462] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:cl_export_fs[461] SERVICE_LABEL=epprd +epprd_rg:cl_export_fs[465] primary epprd +epprd_rg:cl_export_fs[primary:55] echo epprd +epprd_rg:cl_export_fs[465] primary=epprd +epprd_rg:cl_export_fs[466] secondary epprd +epprd_rg:cl_export_fs[secondary:74] [[ -n epprd ]] +epprd_rg:cl_export_fs[secondary:74] shift +epprd_rg:cl_export_fs[secondary:75] echo '' +epprd_rg:cl_export_fs[466] secondary='' +epprd_rg:cl_export_fs[468] nfs_node_state='' +epprd_rg:cl_export_fs[471] : Determine if grace periods are enabled +epprd_rg:cl_export_fs[473] ps -eo args +epprd_rg:cl_export_fs[473] grep -w nfsd +epprd_rg:cl_export_fs[473] grep -qw -- '-gp on' +epprd_rg:cl_export_fs[476] gp=off +epprd_rg:cl_export_fs[480] : We can use an NFSv4 node if grace periods are enabled, we are running a +epprd_rg:cl_export_fs[481] : 64-bit kernel, and the nfs4smctl command exists. +epprd_rg:cl_export_fs[483] [[ off == on ]] +epprd_rg:cl_export_fs[487] rm -f '/var/adm/nfsv4.hacmp/epprd_rg/*' +epprd_rg:cl_export_fs[487] 2> /dev/null +epprd_rg:cl_export_fs[491] : If we have NFSv4 exports, then we need to configure our NFS node so that +epprd_rg:cl_export_fs[492] : we can use stable storage. Note, NFS only supports this functionality in +epprd_rg:cl_export_fs[493] : its 64-bit kernels. +epprd_rg:cl_export_fs[495] [[ -n '' ]] +epprd_rg:cl_export_fs[580] [[ '' == acquiring ]] +epprd_rg:cl_export_fs[585] ALLEXPORTS=All_exports +epprd_rg:cl_export_fs[587] : update resource manager with this action +epprd_rg:cl_export_fs[589] cl_RMupdate resource_acquiring All_exports cl_export_fs 2023-01-28T19:50:52.381372 2023-01-28T19:50:52.385750 +epprd_rg:cl_export_fs[592] : Build a list of all filesystems that need to be exported, irrespective of +epprd_rg:cl_export_fs[593] : the protocol version. Since some filesystems may be exported with multiple +epprd_rg:cl_export_fs[594] : versions, remove any duplicates. +epprd_rg:cl_export_fs[596] echo /board_org /sapmnt/EPP +epprd_rg:cl_export_fs[596] tr ' ' '\n' +epprd_rg:cl_export_fs[596] sort -u +epprd_rg:cl_export_fs[596] FILESYSTEM_LIST=$'/board_org\n/sapmnt/EPP' +epprd_rg:cl_export_fs[599] : Loop through all of the filesystems we need to export ... +epprd_rg:cl_export_fs[603] v3='' +epprd_rg:cl_export_fs[604] v4='' +epprd_rg:cl_export_fs[605] root=epprd:epprda:epprds +epprd_rg:cl_export_fs[606] new_options='' +epprd_rg:cl_export_fs[607] export_file_line='' +epprd_rg:cl_export_fs[608] USING_EXPORTS_FILE=0 +epprd_rg:cl_export_fs[609] export_lines='' +epprd_rg:cl_export_fs[610] otheroption='' +epprd_rg:cl_export_fs[613] : Get the export line from exportfs for this export +epprd_rg:cl_export_fs[615] exportfs +epprd_rg:cl_export_fs[615] grep '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_export_fs[615] export_line='' +epprd_rg:cl_export_fs[617] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[620] : Get the export file for the filesystem from the exports file. +epprd_rg:cl_export_fs[621] : Only look at the part of the line preceding comments. +epprd_rg:cl_export_fs[624] getline_exports /board_org +epprd_rg:cl_export_fs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_export_fs[getline_exports:45] line='' +epprd_rg:cl_export_fs[getline_exports:45] typeset line +epprd_rg:cl_export_fs[getline_exports:46] flag=0 +epprd_rg:cl_export_fs[getline_exports:46] typeset -i flag +epprd_rg:cl_export_fs[getline_exports:47] fs=/board_org +epprd_rg:cl_export_fs[getline_exports:49] [[ -z /board_org ]] +epprd_rg:cl_export_fs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_export_fs[getline_exports:56] read -r line +epprd_rg:cl_export_fs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_export_fs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_export_fs[getline_exports:69] (( 1 == 0 )) +epprd_rg:cl_export_fs[getline_exports:74] [[ 0 == 1 ]] +epprd_rg:cl_export_fs[getline_exports:56] read -r line +epprd_rg:cl_export_fs[getline_exports:59] line='/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_export_fs[getline_exports:60] line='/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_export_fs[getline_exports:63] [[ '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' == #* ]] +epprd_rg:cl_export_fs[getline_exports:68] echo '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_export_fs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_export_fs[getline_exports:69] (( 0 == 0 )) +epprd_rg:cl_export_fs[getline_exports:71] flag=1 +epprd_rg:cl_export_fs[getline_exports:74] [[ 1 == 1 ]] +epprd_rg:cl_export_fs[getline_exports:76] echo '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_export_fs[getline_exports:76] grep -w '\\' +epprd_rg:cl_export_fs[getline_exports:76] [[ -n '' ]] +epprd_rg:cl_export_fs[getline_exports:81] cl_exports_data=' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_export_fs[getline_exports:82] break +epprd_rg:cl_export_fs[getline_exports:89] return 0 +epprd_rg:cl_export_fs[625] export_file_line=' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_export_fs[628] : If the administrator provides an entry for the filesystem in the +epprd_rg:cl_export_fs[629] : exports file then ignore the root option that was passed in on the +epprd_rg:cl_export_fs[630] : command line. +epprd_rg:cl_export_fs[632] [[ -n ' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' ]] +epprd_rg:cl_export_fs[632] root='' +epprd_rg:cl_export_fs[636] : If the filesystem currently is not exported, then get the options from +epprd_rg:cl_export_fs[637] : the exports file. We will merge these options with options specified +epprd_rg:cl_export_fs[638] : through resource group attributes to produce the actual options we will +epprd_rg:cl_export_fs[639] : provide to exportfs. +epprd_rg:cl_export_fs[641] [[ -z '' ]] +epprd_rg:cl_export_fs[643] export_line=' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_export_fs[644] USING_EXPORTS_FILE=1 +epprd_rg:cl_export_fs[648] : In case of multiple exports for same filesystem +epprd_rg:cl_export_fs[649] : Process them line by line +epprd_rg:cl_export_fs[651] set +u +epprd_rg:cl_export_fs[652] oldifs=$' \t\n' +epprd_rg:cl_export_fs[653] IFS=$'\n' +epprd_rg:cl_export_fs[653] export_lines=( ' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' ) +epprd_rg:cl_export_fs[654] IFS=$' \t\n' +epprd_rg:cl_export_fs[656] [ -n ' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' ] +epprd_rg:cl_export_fs[661] : The line is of the format: filesystem -option1,option2,... +epprd_rg:cl_export_fs[662] : This will give 'option1 option2 ...' +epprd_rg:cl_export_fs[664] echo /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds +epprd_rg:cl_export_fs[664] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_export_fs[664] cut -d- -f2- +epprd_rg:cl_export_fs[665] tr , ' ' +epprd_rg:cl_export_fs[664] old_options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap:epprd:epprda:epprds root=epprdap:epprd:epprda:epprds ' +epprd_rg:cl_export_fs[668] : Each option can be of the format name=value, or just name. +epprd_rg:cl_export_fs[669] : We care about the hasrv, vers, and root options. +epprd_rg:cl_export_fs[673] : Loop through all of the export options for this export. +epprd_rg:cl_export_fs[700] [[ sec=sys:krb5p:krb5i:krb5:dh == -* ]] +epprd_rg:cl_export_fs[708] [[ sec=sys:krb5p:krb5i:krb5:dh == /board_org ]] +epprd_rg:cl_export_fs[716] echo sec=sys:krb5p:krb5i:krb5:dh +epprd_rg:cl_export_fs[716] cut -d= -f 1 +epprd_rg:cl_export_fs[716] otheroption=sec +epprd_rg:cl_export_fs[717] [[ '' == *sec* ]] +epprd_rg:cl_export_fs[724] : Merge in all remaining options. +epprd_rg:cl_export_fs[726] new_options=,sec=sys:krb5p:krb5i:krb5:dh +epprd_rg:cl_export_fs[700] [[ rw == -* ]] +epprd_rg:cl_export_fs[708] [[ rw == /board_org ]] +epprd_rg:cl_export_fs[716] echo rw +epprd_rg:cl_export_fs[716] cut -d= -f 1 +epprd_rg:cl_export_fs[716] otheroption=rw +epprd_rg:cl_export_fs[717] [[ ,sec=sys:krb5p:krb5i:krb5:dh == *rw* ]] +epprd_rg:cl_export_fs[724] : Merge in all remaining options. +epprd_rg:cl_export_fs[726] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw +epprd_rg:cl_export_fs[700] [[ access=epprdap:epprd:epprda:epprds == -* ]] +epprd_rg:cl_export_fs[708] [[ access=epprdap:epprd:epprda:epprds == /board_org ]] +epprd_rg:cl_export_fs[716] echo access=epprdap:epprd:epprda:epprds +epprd_rg:cl_export_fs[716] cut -d= -f 1 +epprd_rg:cl_export_fs[716] otheroption=access +epprd_rg:cl_export_fs[717] [[ ,sec=sys:krb5p:krb5i:krb5:dh,rw == *access* ]] +epprd_rg:cl_export_fs[724] : Merge in all remaining options. +epprd_rg:cl_export_fs[726] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds +epprd_rg:cl_export_fs[695] : Merge in the root option. +epprd_rg:cl_export_fs[697] echo root=epprdap:epprd:epprda:epprds +epprd_rg:cl_export_fs[697] cut -d= -f2- +epprd_rg:cl_export_fs[697] root=epprdap:epprd:epprda:epprds +epprd_rg:cl_export_fs[733] set -u +epprd_rg:cl_export_fs[736] : At this point, v3 and v4 are set based on what is actually exported +epprd_rg:cl_export_fs[737] : or what is configured to be exported in the exports file. +epprd_rg:cl_export_fs[740] (( USING_EXPORTS_FILE )) +epprd_rg:cl_export_fs[742] v3='' +epprd_rg:cl_export_fs[743] v4='' +epprd_rg:cl_export_fs[747] : At this point, v3 and v4 are set based on what is actually exported. +epprd_rg:cl_export_fs[748] : Now add additional versions if the resource group has them configured. +epprd_rg:cl_export_fs[752] [[ /board_org == /board_org ]] +epprd_rg:cl_export_fs[752] v3=:2:3 +epprd_rg:cl_export_fs[752] break +epprd_rg:cl_export_fs[761] : Versions 2 and 3 are the default versions. Some versions of AIX do +epprd_rg:cl_export_fs[762] : not support the vers export option, so only use the option if we are +epprd_rg:cl_export_fs[763] : exporting a non-default value such as 4 +epprd_rg:cl_export_fs[765] [[ -n '' ]] +epprd_rg:cl_export_fs[779] [[ -n epprdap:epprd:epprda:epprds ]] +epprd_rg:cl_export_fs[782] : If we have root priveliged clients, +epprd_rg:cl_export_fs[783] : then add them to the option list. +epprd_rg:cl_export_fs[785] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds +epprd_rg:cl_export_fs[788] [[ -n '' ]] +epprd_rg:cl_export_fs[798] : Strip off the leading comma +epprd_rg:cl_export_fs[800] echo ,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds +epprd_rg:cl_export_fs[800] cut -d, -f2- +epprd_rg:cl_export_fs[800] new_options=sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds +epprd_rg:cl_export_fs[802] [[ -z sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds ]] +epprd_rg:cl_export_fs[811] : Exporting filesystem /board_org with options sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds +epprd_rg:cl_export_fs[813] exportfs -i -o sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds /board_org +epprd_rg:cl_export_fs[814] RC=0 +epprd_rg:cl_export_fs[817] (( 0 != 0 )) +epprd_rg:cl_export_fs[603] v3='' +epprd_rg:cl_export_fs[604] v4='' +epprd_rg:cl_export_fs[605] root=epprd:epprda:epprds +epprd_rg:cl_export_fs[606] new_options='' +epprd_rg:cl_export_fs[607] export_file_line='' +epprd_rg:cl_export_fs[608] USING_EXPORTS_FILE=0 +epprd_rg:cl_export_fs[609] export_lines[0]='' +epprd_rg:cl_export_fs[610] otheroption='' +epprd_rg:cl_export_fs[613] : Get the export line from exportfs for this export +epprd_rg:cl_export_fs[615] exportfs +epprd_rg:cl_export_fs[615] grep '^[[:space:]]*/sapmnt/EPP[[:space:]]' +epprd_rg:cl_export_fs[615] export_line='' +epprd_rg:cl_export_fs[617] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[620] : Get the export file for the filesystem from the exports file. +epprd_rg:cl_export_fs[621] : Only look at the part of the line preceding comments. +epprd_rg:cl_export_fs[624] getline_exports /sapmnt/EPP +epprd_rg:cl_export_fs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_export_fs[getline_exports:45] line='' +epprd_rg:cl_export_fs[getline_exports:45] typeset line +epprd_rg:cl_export_fs[getline_exports:46] flag=0 +epprd_rg:cl_export_fs[getline_exports:46] typeset -i flag +epprd_rg:cl_export_fs[getline_exports:47] fs=/sapmnt/EPP +epprd_rg:cl_export_fs[getline_exports:49] [[ -z /sapmnt/EPP ]] +epprd_rg:cl_export_fs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_export_fs[getline_exports:56] read -r line +epprd_rg:cl_export_fs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_export_fs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:68] grep -q '^[[:space:]]*/sapmnt/EPP[[:space:]]' +epprd_rg:cl_export_fs[getline_exports:69] (( 0 == 0 )) +epprd_rg:cl_export_fs[getline_exports:71] flag=1 +epprd_rg:cl_export_fs[getline_exports:74] [[ 1 == 1 ]] +epprd_rg:cl_export_fs[getline_exports:76] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:76] grep -w '\\' +epprd_rg:cl_export_fs[getline_exports:76] [[ -n '' ]] +epprd_rg:cl_export_fs[getline_exports:81] cl_exports_data=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:82] break +epprd_rg:cl_export_fs[getline_exports:89] return 0 +epprd_rg:cl_export_fs[625] export_file_line=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[628] : If the administrator provides an entry for the filesystem in the +epprd_rg:cl_export_fs[629] : exports file then ignore the root option that was passed in on the +epprd_rg:cl_export_fs[630] : command line. +epprd_rg:cl_export_fs[632] [[ -n ' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' ]] +epprd_rg:cl_export_fs[632] root='' +epprd_rg:cl_export_fs[636] : If the filesystem currently is not exported, then get the options from +epprd_rg:cl_export_fs[637] : the exports file. We will merge these options with options specified +epprd_rg:cl_export_fs[638] : through resource group attributes to produce the actual options we will +epprd_rg:cl_export_fs[639] : provide to exportfs. +epprd_rg:cl_export_fs[641] [[ -z '' ]] +epprd_rg:cl_export_fs[643] export_line=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[644] USING_EXPORTS_FILE=1 +epprd_rg:cl_export_fs[648] : In case of multiple exports for same filesystem +epprd_rg:cl_export_fs[649] : Process them line by line +epprd_rg:cl_export_fs[651] set +u +epprd_rg:cl_export_fs[652] oldifs=$' \t\n' +epprd_rg:cl_export_fs[653] IFS=$'\n' +epprd_rg:cl_export_fs[653] export_lines=( ' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' ) +epprd_rg:cl_export_fs[654] IFS=$' \t\n' +epprd_rg:cl_export_fs[656] [ -n ' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' ] +epprd_rg:cl_export_fs[661] : The line is of the format: filesystem -option1,option2,... +epprd_rg:cl_export_fs[662] : This will give 'option1 option2 ...' +epprd_rg:cl_export_fs[664] echo /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_export_fs[664] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_export_fs[664] cut -d- -f2- +epprd_rg:cl_export_fs[665] tr , ' ' +epprd_rg:cl_export_fs[664] old_options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap root=epprdap ' +epprd_rg:cl_export_fs[668] : Each option can be of the format name=value, or just name. +epprd_rg:cl_export_fs[669] : We care about the hasrv, vers, and root options. +epprd_rg:cl_export_fs[673] : Loop through all of the export options for this export. +epprd_rg:cl_export_fs[700] [[ sec=sys:krb5p:krb5i:krb5:dh == -* ]] +epprd_rg:cl_export_fs[708] [[ sec=sys:krb5p:krb5i:krb5:dh == /sapmnt/EPP ]] +epprd_rg:cl_export_fs[716] echo sec=sys:krb5p:krb5i:krb5:dh +epprd_rg:cl_export_fs[716] cut -d= -f 1 +epprd_rg:cl_export_fs[716] otheroption=sec +epprd_rg:cl_export_fs[717] [[ '' == *sec* ]] +epprd_rg:cl_export_fs[724] : Merge in all remaining options. +epprd_rg:cl_export_fs[726] new_options=,sec=sys:krb5p:krb5i:krb5:dh +epprd_rg:cl_export_fs[700] [[ rw == -* ]] +epprd_rg:cl_export_fs[708] [[ rw == /sapmnt/EPP ]] +epprd_rg:cl_export_fs[716] cut -d= -f 1 +epprd_rg:cl_export_fs[716] echo rw +epprd_rg:cl_export_fs[716] otheroption=rw +epprd_rg:cl_export_fs[717] [[ ,sec=sys:krb5p:krb5i:krb5:dh == *rw* ]] +epprd_rg:cl_export_fs[724] : Merge in all remaining options. +epprd_rg:cl_export_fs[726] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw +epprd_rg:cl_export_fs[700] [[ access=epprdap == -* ]] +epprd_rg:cl_export_fs[708] [[ access=epprdap == /sapmnt/EPP ]] +epprd_rg:cl_export_fs[716] echo access=epprdap +epprd_rg:cl_export_fs[716] cut -d= -f 1 +epprd_rg:cl_export_fs[716] otheroption=access +epprd_rg:cl_export_fs[717] [[ ,sec=sys:krb5p:krb5i:krb5:dh,rw == *access* ]] +epprd_rg:cl_export_fs[724] : Merge in all remaining options. +epprd_rg:cl_export_fs[726] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap +epprd_rg:cl_export_fs[695] : Merge in the root option. +epprd_rg:cl_export_fs[697] echo root=epprdap +epprd_rg:cl_export_fs[697] cut -d= -f2- +epprd_rg:cl_export_fs[697] root=epprdap +epprd_rg:cl_export_fs[733] set -u +epprd_rg:cl_export_fs[736] : At this point, v3 and v4 are set based on what is actually exported +epprd_rg:cl_export_fs[737] : or what is configured to be exported in the exports file. +epprd_rg:cl_export_fs[740] (( USING_EXPORTS_FILE )) +epprd_rg:cl_export_fs[742] v3='' +epprd_rg:cl_export_fs[743] v4='' +epprd_rg:cl_export_fs[747] : At this point, v3 and v4 are set based on what is actually exported. +epprd_rg:cl_export_fs[748] : Now add additional versions if the resource group has them configured. +epprd_rg:cl_export_fs[752] [[ /sapmnt/EPP == /board_org ]] +epprd_rg:cl_export_fs[752] [[ /sapmnt/EPP == /sapmnt/EPP ]] +epprd_rg:cl_export_fs[752] v3=:2:3 +epprd_rg:cl_export_fs[752] break +epprd_rg:cl_export_fs[761] : Versions 2 and 3 are the default versions. Some versions of AIX do +epprd_rg:cl_export_fs[762] : not support the vers export option, so only use the option if we are +epprd_rg:cl_export_fs[763] : exporting a non-default value such as 4 +epprd_rg:cl_export_fs[765] [[ -n '' ]] +epprd_rg:cl_export_fs[779] [[ -n epprdap ]] +epprd_rg:cl_export_fs[782] : If we have root priveliged clients, +epprd_rg:cl_export_fs[783] : then add them to the option list. +epprd_rg:cl_export_fs[785] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_export_fs[788] [[ -n '' ]] +epprd_rg:cl_export_fs[798] : Strip off the leading comma +epprd_rg:cl_export_fs[800] echo ,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_export_fs[800] cut -d, -f2- +epprd_rg:cl_export_fs[800] new_options=sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_export_fs[802] [[ -z sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap ]] +epprd_rg:cl_export_fs[811] : Exporting filesystem /sapmnt/EPP with options sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_export_fs[813] exportfs -i -o sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap /sapmnt/EPP +epprd_rg:cl_export_fs[814] RC=0 +epprd_rg:cl_export_fs[817] (( 0 != 0 )) +epprd_rg:cl_export_fs[834] ALLNOERREXPORT=All_nonerror_exports +epprd_rg:cl_export_fs[836] : update resource manager with results +epprd_rg:cl_export_fs[838] cl_RMupdate resource_up All_nonerror_exports cl_export_fs 2023-01-28T19:50:52.546977 2023-01-28T19:50:52.551399 +epprd_rg:cl_export_fs[840] exit 0 +epprd_rg:process_resources(14.407)[export_filesystems:1662] RC=0 +epprd_rg:process_resources(14.407)[export_filesystems:1663] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources(14.407)[export_filesystems:1669] (( 0 != 0 )) +epprd_rg:process_resources(14.408)[export_filesystems:1675] return 0 +epprd_rg:process_resources(14.408)[3324] true +epprd_rg:process_resources(14.408)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(14.408)[3328] set -a +epprd_rg:process_resources(14.408)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T19:50:52.565291 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(14.421)[3329] eval JOB_TYPE=TELINIT +epprd_rg:process_resources(14.421)[1] JOB_TYPE=TELINIT +epprd_rg:process_resources(14.421)[3330] RC=0 +epprd_rg:process_resources(14.421)[3331] set +a +epprd_rg:process_resources(14.421)[3333] (( 0 != 0 )) +epprd_rg:process_resources(14.421)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(14.421)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(14.421)[3343] export GROUPNAME +epprd_rg:process_resources(14.421)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(14.421)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(14.421)[3360] [[ TELINIT == RELEASE ]] +epprd_rg:process_resources(14.421)[3360] [[ TELINIT == ONLINE ]] +epprd_rg:process_resources(14.421)[3435] cl_telinit +epprd_rg:cl_telinit[178] version=%I% +epprd_rg:cl_telinit[182] TELINIT_FILE=/usr/es/sbin/cluster/.telinit +epprd_rg:cl_telinit[183] USE_TELINIT_FILE=/usr/es/sbin/cluster/.use_telinit +epprd_rg:cl_telinit[185] [[ -f /usr/es/sbin/cluster/.use_telinit ]] +epprd_rg:cl_telinit[189] USE_TELINIT=0 +epprd_rg:cl_telinit[198] [[ '' == -boot ]] +epprd_rg:cl_telinit[236] cl_lsitab clinit +epprd_rg:cl_telinit[236] 1> /dev/null 2>& 1 +epprd_rg:cl_telinit[239] : telinit a disabled +epprd_rg:cl_telinit[241] return 0 +epprd_rg:process_resources(14.442)[3324] true +epprd_rg:process_resources(14.442)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(14.442)[3328] set -a +epprd_rg:process_resources(14.443)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T19:50:52.599898 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(14.456)[3329] eval JOB_TYPE=MOUNT_FILESYSTEMS ACTION=ACQUIRE FILE_SYSTEMS='"/board;/board_org"' RESOURCE_GROUPS='"epprd_rg' '"' NFS_NETWORKS='""' NFS_HOSTS='""' IP_LABELS='"epprd"' +epprd_rg:process_resources(14.456)[1] JOB_TYPE=MOUNT_FILESYSTEMS +epprd_rg:process_resources(14.456)[1] ACTION=ACQUIRE +epprd_rg:process_resources(14.456)[1] FILE_SYSTEMS='/board;/board_org' +epprd_rg:process_resources(14.456)[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources(14.456)[1] NFS_NETWORKS='' +epprd_rg:process_resources(14.456)[1] NFS_HOSTS='' +epprd_rg:process_resources(14.456)[1] IP_LABELS=epprd +epprd_rg:process_resources(14.456)[3330] RC=0 +epprd_rg:process_resources(14.456)[3331] set +a +epprd_rg:process_resources(14.456)[3333] (( 0 != 0 )) +epprd_rg:process_resources(14.456)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(14.456)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(14.456)[3343] export GROUPNAME +epprd_rg:process_resources(14.456)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(14.456)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(14.456)[3360] [[ MOUNT_FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources(14.456)[3360] [[ MOUNT_FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources(14.456)[3612] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources(14.456)[3614] mount_nfs_filesystems MOUNT +epprd_rg:process_resources(14.456)[mount_nfs_filesystems:1437] PS4_FUNC=mount_nfs_filesystems +epprd_rg:process_resources(14.456)[mount_nfs_filesystems:1437] typeset PS4_FUNC +epprd_rg:process_resources(14.456)[mount_nfs_filesystems:1438] [[ high == high ]] +epprd_rg:process_resources(14.456)[mount_nfs_filesystems:1438] set -x +epprd_rg:process_resources(14.456)[mount_nfs_filesystems:1440] post_event_member=FALSE +epprd_rg:process_resources(14.456)[mount_nfs_filesystems:1444] [[ epprda == epprda ]] +epprd_rg:process_resources(14.456)[mount_nfs_filesystems:1446] post_event_member=TRUE +epprd_rg:process_resources(14.456)[mount_nfs_filesystems:1447] break +epprd_rg:process_resources(14.456)[mount_nfs_filesystems:1452] : This node will not be in the resource group so do not mount filesystems. +epprd_rg:process_resources(14.456)[mount_nfs_filesystems:1454] [[ TRUE == FALSE ]] +epprd_rg:process_resources(14.456)[mount_nfs_filesystems:1459] STAT=0 +epprd_rg:process_resources(14.456)[mount_nfs_filesystems:1463] export GROUPNAME +epprd_rg:process_resources(14.457)[mount_nfs_filesystems:1465] get_list_head '/board;/board_org' +epprd_rg:process_resources(14.458)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(14.458)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(14.458)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(14.458)[get_list_head:60] set -x +epprd_rg:process_resources(14.459)[get_list_head:61] echo '/board;/board_org' +epprd_rg:process_resources(14.461)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(14.461)[get_list_head:61] IFS=: +epprd_rg:process_resources(14.462)[get_list_head:62] echo '/board;/board_org' +epprd_rg:process_resources(14.463)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(14.460)[mount_nfs_filesystems:1465] read LIST_OF_FILE_SYSTEMS_FOR_RG +epprd_rg:process_resources(14.466)[mount_nfs_filesystems:1466] get_list_tail '/board;/board_org' +epprd_rg:process_resources(14.466)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(14.466)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(14.466)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(14.466)[get_list_tail:68] set -x +epprd_rg:process_resources(14.467)[get_list_tail:69] echo '/board;/board_org' +epprd_rg:process_resources(14.471)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(14.471)[get_list_tail:69] IFS=: +epprd_rg:process_resources(14.471)[get_list_tail:70] echo +epprd_rg:process_resources(14.470)[mount_nfs_filesystems:1466] read FILE_SYSTEMS +epprd_rg:process_resources(14.474)[mount_nfs_filesystems:1468] get_list_head +epprd_rg:process_resources(14.474)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(14.474)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(14.474)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(14.474)[get_list_head:60] set -x +epprd_rg:process_resources(14.476)[get_list_head:61] echo +epprd_rg:process_resources(14.476)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(14.476)[get_list_head:61] IFS=: +epprd_rg:process_resources(14.478)[get_list_head:62] echo +epprd_rg:process_resources(14.479)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(14.475)[mount_nfs_filesystems:1468] read NFS_HOST +epprd_rg:process_resources(14.484)[mount_nfs_filesystems:1469] get_list_tail +epprd_rg:process_resources(14.485)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(14.485)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(14.485)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(14.485)[get_list_tail:68] set -x +epprd_rg:process_resources(14.486)[get_list_tail:69] echo +epprd_rg:process_resources(14.488)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(14.488)[get_list_tail:69] IFS=: +epprd_rg:process_resources(14.488)[get_list_tail:70] echo +epprd_rg:process_resources(14.485)[mount_nfs_filesystems:1469] read NFS_HOSTS +epprd_rg:process_resources(14.491)[mount_nfs_filesystems:1471] get_list_head +epprd_rg:process_resources(14.491)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(14.491)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(14.491)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(14.491)[get_list_head:60] set -x +epprd_rg:process_resources(14.492)[get_list_head:61] echo +epprd_rg:process_resources(14.494)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(14.494)[get_list_head:61] IFS=: +epprd_rg:process_resources(14.495)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(14.497)[get_list_head:62] echo +epprd_rg:process_resources(14.493)[mount_nfs_filesystems:1471] read NFS_NETWORK +epprd_rg:process_resources(14.499)[mount_nfs_filesystems:1472] get_list_tail +epprd_rg:process_resources(14.500)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(14.500)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(14.500)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(14.500)[get_list_tail:68] set -x +epprd_rg:process_resources(14.501)[get_list_tail:69] echo +epprd_rg:process_resources(14.504)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(14.504)[get_list_tail:69] IFS=: +epprd_rg:process_resources(14.504)[get_list_tail:70] echo +epprd_rg:process_resources(14.504)[mount_nfs_filesystems:1472] read NFS_NETWORKS +epprd_rg:process_resources(14.505)[mount_nfs_filesystems:1474] get_list_head epprd +epprd_rg:process_resources(14.506)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(14.506)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(14.506)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(14.506)[get_list_head:60] set -x +epprd_rg:process_resources(14.507)[get_list_head:61] echo epprd +epprd_rg:process_resources(14.510)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(14.510)[get_list_head:61] IFS=: +epprd_rg:process_resources(14.511)[get_list_head:62] echo epprd +epprd_rg:process_resources(14.512)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(14.509)[mount_nfs_filesystems:1474] read LIST_OF_IP_LABELS_FOR_RG +epprd_rg:process_resources(14.515)[mount_nfs_filesystems:1475] get_list_tail epprd +epprd_rg:process_resources(14.516)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(14.516)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(14.516)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(14.516)[get_list_tail:68] set -x +epprd_rg:process_resources(14.517)[get_list_tail:69] echo epprd +epprd_rg:process_resources(14.520)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(14.520)[get_list_tail:69] IFS=: +epprd_rg:process_resources(14.520)[get_list_tail:70] echo +epprd_rg:process_resources(14.520)[mount_nfs_filesystems:1475] read IP_LABELS +epprd_rg:process_resources(14.522)[mount_nfs_filesystems:1477] MOUNT_FILESYSTEM='/board;/board_org' +epprd_rg:process_resources(14.522)[mount_nfs_filesystems:1478] NFSMOUNT_LABEL=epprd +epprd_rg:process_resources(14.522)[mount_nfs_filesystems:1481] : Do the required NFS_mounts. +epprd_rg:process_resources(14.522)[mount_nfs_filesystems:1484] NW_NFSMOUNT_LABEL='' +epprd_rg:process_resources(14.522)[mount_nfs_filesystems:1486] [[ -z '' ]] +epprd_rg:process_resources(14.522)[mount_nfs_filesystems:1488] NFS_HOST=epprda +epprd_rg:process_resources(14.522)[mount_nfs_filesystems:1491] NFSHOST='' +epprd_rg:process_resources(14.522)[mount_nfs_filesystems:1492] [[ -n epprda ]] +epprd_rg:process_resources(14.522)[mount_nfs_filesystems:1494] [[ -n '' ]] +epprd_rg:process_resources(14.522)[mount_nfs_filesystems:1516] [[ MOUNT == REMOUNT ]] +epprd_rg:process_resources(14.522)[mount_nfs_filesystems:1526] ping epprd 1024 1 +epprd_rg:process_resources(14.523)[mount_nfs_filesystems:1526] 1> /dev/null +epprd_rg:process_resources(14.526)[mount_nfs_filesystems:1528] NFSHOST=epprd +epprd_rg:process_resources(14.526)[mount_nfs_filesystems:1529] break +epprd_rg:process_resources(14.526)[mount_nfs_filesystems:1533] [[ -n epprd ]] +epprd_rg:process_resources(14.526)[mount_nfs_filesystems:1536] : activate_nfs will not wait for the mounts to complete +epprd_rg:process_resources(14.526)[mount_nfs_filesystems:1538] cl_activate_nfs 1 epprd '/board;/board_org' +epprd_rg:cl_activate_nfs[68] [[ high == high ]] +epprd_rg:cl_activate_nfs[68] version='1.19.4.2 $Source$' +epprd_rg:cl_activate_nfs[70] . /usr/es/sbin/cluster/events/utils/cl_nfs_utils +epprd_rg:cl_activate_nfs[98] PROGNAME=cl_activate_nfs +epprd_rg:cl_activate_nfs[99] [[ high == high ]] +epprd_rg:cl_activate_nfs[101] set -x +epprd_rg:cl_activate_nfs[102] version=%I +epprd_rg:cl_activate_nfs[105] cl_exports_data='' +epprd_rg:cl_activate_nfs[105] typeset cl_exports_data +epprd_rg:cl_activate_nfs[106] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_activate_nfs[72] set -u +epprd_rg:cl_activate_nfs[242] grep -w ^MOUNT_WLMCNTRL_SELFMANAGE /etc/environment +epprd_rg:cl_activate_nfs[242] export eval +epprd_rg:cl_activate_nfs[244] (( 3 < 3 )) +epprd_rg:cl_activate_nfs[253] ATTEMPTS=1 +epprd_rg:cl_activate_nfs[253] typeset -li ATTEMPTS +epprd_rg:cl_activate_nfs[254] HOST=epprd +epprd_rg:cl_activate_nfs[256] shift 2 +epprd_rg:cl_activate_nfs[261] FILELIST='/board;/board_org' +epprd_rg:cl_activate_nfs[266] print '/board;/board_org' +epprd_rg:cl_activate_nfs[266] grep -q '\;/' +epprd_rg:cl_activate_nfs[271] CROSSMOUNTS=TRUE +epprd_rg:cl_activate_nfs[272] print '/board;/board_org' +epprd_rg:cl_activate_nfs[272] tr ' ' '\n' +epprd_rg:cl_activate_nfs[272] /bin/sort -k 1,1 '-t;' +epprd_rg:cl_activate_nfs[272] MOUNTLIST='/board;/board_org' +epprd_rg:cl_activate_nfs[281] ALLNFS=All_nfs_mounts +epprd_rg:cl_activate_nfs[282] cl_RMupdate resource_acquiring All_nfs_mounts cl_activate_nfs 2023-01-28T19:50:52.723384 2023-01-28T19:50:52.727759 +epprd_rg:cl_activate_nfs[289] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_activate_nfs[288] odmget -q name='RECOVERY_METHOD AND group=epprd_rg' HACMPresource +epprd_rg:cl_activate_nfs[288] METHOD=sequential +epprd_rg:cl_activate_nfs[291] odmget -q name='EXPORT_FILESYSTEM AND group=epprd_rg' HACMPresource +epprd_rg:cl_activate_nfs[291] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_activate_nfs[291] EXPORT_FILESYSTEM=$'/board_org\n/sapmnt/EPP' +epprd_rg:cl_activate_nfs[293] odmget -q name='EXPORT_FILESYSTEM_V4 AND group=epprd_rg' HACMPresource +epprd_rg:cl_activate_nfs[293] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_activate_nfs[293] EXPORT_FILESYSTEM_V4='' +epprd_rg:cl_activate_nfs[302] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_activate_nfs[304] [[ -z '' ]] +epprd_rg:cl_activate_nfs[305] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_activate_nfs[308] VERSION_SOURCE=FILES +epprd_rg:cl_activate_nfs[320] [[ FILES == FILES ]] +epprd_rg:cl_activate_nfs[322] export_v3='' +epprd_rg:cl_activate_nfs[323] export_v4='' +epprd_rg:cl_activate_nfs[330] getline_exports /board_org +epprd_rg:cl_activate_nfs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_activate_nfs[getline_exports:45] line='' +epprd_rg:cl_activate_nfs[getline_exports:45] typeset line +epprd_rg:cl_activate_nfs[getline_exports:46] flag=0 +epprd_rg:cl_activate_nfs[getline_exports:46] typeset -i flag +epprd_rg:cl_activate_nfs[getline_exports:47] fs=/board_org +epprd_rg:cl_activate_nfs[getline_exports:49] [[ -z /board_org ]] +epprd_rg:cl_activate_nfs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_activate_nfs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_activate_nfs[getline_exports:56] read -r line +epprd_rg:cl_activate_nfs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_activate_nfs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_activate_nfs[getline_exports:69] (( 1 == 0 )) +epprd_rg:cl_activate_nfs[getline_exports:74] [[ 0 == 1 ]] +epprd_rg:cl_activate_nfs[getline_exports:56] read -r line +epprd_rg:cl_activate_nfs[getline_exports:59] line='/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_activate_nfs[getline_exports:60] line='/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_activate_nfs[getline_exports:63] [[ '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' == #* ]] +epprd_rg:cl_activate_nfs[getline_exports:68] echo '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_activate_nfs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_activate_nfs[getline_exports:69] (( 0 == 0 )) +epprd_rg:cl_activate_nfs[getline_exports:71] flag=1 +epprd_rg:cl_activate_nfs[getline_exports:74] [[ 1 == 1 ]] +epprd_rg:cl_activate_nfs[getline_exports:76] echo '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_activate_nfs[getline_exports:76] grep -w '\\' +epprd_rg:cl_activate_nfs[getline_exports:76] [[ -n '' ]] +epprd_rg:cl_activate_nfs[getline_exports:81] cl_exports_data=' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_activate_nfs[getline_exports:82] break +epprd_rg:cl_activate_nfs[getline_exports:89] return 0 +epprd_rg:cl_activate_nfs[331] export_line=' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_activate_nfs[336] echo /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds +epprd_rg:cl_activate_nfs[336] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_activate_nfs[337] cut -d- -f2- +epprd_rg:cl_activate_nfs[337] tr , ' ' +epprd_rg:cl_activate_nfs[336] options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap:epprd:epprda:epprds root=epprdap:epprd:epprda:epprds ' +epprd_rg:cl_activate_nfs[343] vers_missing=1 +epprd_rg:cl_activate_nfs[366] (( vers_missing )) +epprd_rg:cl_activate_nfs[366] export_v3=' /board_org' +epprd_rg:cl_activate_nfs[330] getline_exports /sapmnt/EPP +epprd_rg:cl_activate_nfs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_activate_nfs[getline_exports:45] line='' +epprd_rg:cl_activate_nfs[getline_exports:45] typeset line +epprd_rg:cl_activate_nfs[getline_exports:46] flag=0 +epprd_rg:cl_activate_nfs[getline_exports:46] typeset -i flag +epprd_rg:cl_activate_nfs[getline_exports:47] fs=/sapmnt/EPP +epprd_rg:cl_activate_nfs[getline_exports:49] [[ -z /sapmnt/EPP ]] +epprd_rg:cl_activate_nfs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_activate_nfs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_activate_nfs[getline_exports:56] read -r line +epprd_rg:cl_activate_nfs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_activate_nfs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:68] grep -q '^[[:space:]]*/sapmnt/EPP[[:space:]]' +epprd_rg:cl_activate_nfs[getline_exports:69] (( 0 == 0 )) +epprd_rg:cl_activate_nfs[getline_exports:71] flag=1 +epprd_rg:cl_activate_nfs[getline_exports:74] [[ 1 == 1 ]] +epprd_rg:cl_activate_nfs[getline_exports:76] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:76] grep -w '\\' +epprd_rg:cl_activate_nfs[getline_exports:76] [[ -n '' ]] +epprd_rg:cl_activate_nfs[getline_exports:81] cl_exports_data=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:82] break +epprd_rg:cl_activate_nfs[getline_exports:89] return 0 +epprd_rg:cl_activate_nfs[331] export_line=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[336] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_activate_nfs[336] echo /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_activate_nfs[337] cut -d- -f2- +epprd_rg:cl_activate_nfs[337] tr , ' ' +epprd_rg:cl_activate_nfs[336] options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap root=epprdap ' +epprd_rg:cl_activate_nfs[343] vers_missing=1 +epprd_rg:cl_activate_nfs[366] (( vers_missing )) +epprd_rg:cl_activate_nfs[366] export_v3=' /board_org /sapmnt/EPP' +epprd_rg:cl_activate_nfs[369] EXPORT_FILESYSTEM=' /board_org /sapmnt/EPP' +epprd_rg:cl_activate_nfs[370] EXPORT_FILESYSTEM_V4='' +epprd_rg:cl_activate_nfs[377] [[ -x /usr/sbin/nfsrgyd ]] +epprd_rg:cl_activate_nfs[378] [[ -n '' ]] +epprd_rg:cl_activate_nfs[379] grep -q vers=4 /etc/filesystems +epprd_rg:cl_activate_nfs[394] [[ TRUE == TRUE ]] +epprd_rg:cl_activate_nfs[411] filesystem=/board_org +epprd_rg:cl_activate_nfs[412] mountpoint=/board +epprd_rg:cl_activate_nfs:/board;/board_org[429] PS4_LOOP='/board;/board_org' +epprd_rg:cl_activate_nfs:/board;/board_org[430] [[ sequential == sequential ]] +epprd_rg:cl_activate_nfs:/board;/board_org[432] nfs_mount 1 epprd /board_org /board +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:99] (( 4 != 4 )) +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:108] LIMIT=1 +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:108] typeset -li LIMIT +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:109] HOST=epprd +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:110] FileSystem=/board_org +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:111] MountPoint=/board +epprd_rg:cl_activate_nfs(0.129):/board;/board_org[nfs_mount:116] mount +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:119] [[ mounted == /board ]] +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:119] [[ --------------- == /board ]] +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:119] [[ procfs == /board ]] +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:119] [[ ahafs == /board ]] +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:119] [[ /sapcd == /board ]] +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.132):/board;/board_org[nfs_mount:119] [[ /board == /board ]] +epprd_rg:cl_activate_nfs(0.132):/board;/board_org[nfs_mount:121] cl_echo 11 'cl_activate_nfs: Filesystem /board already mounted.\n' cl_activate_nfs /board Jan 28 2023 19:50:52cl_activate_nfs: Filesystem /board already mounted. +epprd_rg:cl_activate_nfs(0.150):/board;/board_org[nfs_mount:122] return 0 +epprd_rg:process_resources(14.681)[mount_nfs_filesystems:1540] RC=0 +epprd_rg:process_resources(14.681)[mount_nfs_filesystems:1541] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources(14.681)[mount_nfs_filesystems:1549] (( 0 != 0 )) +epprd_rg:process_resources(14.681)[mount_nfs_filesystems:1565] return 0 +epprd_rg:process_resources(14.682)[3324] true +epprd_rg:process_resources(14.682)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(14.682)[3328] set -a +epprd_rg:process_resources(14.682)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T19:50:52.838763 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(14.695)[3329] eval JOB_TYPE=NONE +epprd_rg:process_resources(14.695)[1] JOB_TYPE=NONE +epprd_rg:process_resources(14.695)[3330] RC=0 +epprd_rg:process_resources(14.695)[3331] set +a +epprd_rg:process_resources(14.695)[3333] (( 0 != 0 )) +epprd_rg:process_resources(14.695)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(14.695)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(14.695)[3343] export GROUPNAME +epprd_rg:process_resources(14.695)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(14.695)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(14.695)[3360] [[ NONE == RELEASE ]] +epprd_rg:process_resources(14.695)[3360] [[ NONE == ONLINE ]] +epprd_rg:process_resources(14.695)[3729] break +epprd_rg:process_resources(14.695)[3740] : If sddsrv was turned off above, turn it back on again +epprd_rg:process_resources(14.695)[3742] [[ FALSE == TRUE ]] +epprd_rg:process_resources(14.695)[3747] exit 0 :rg_move[247] : unsetting AM_SYNC_CALLED_BY from $'callers environment as\n: we dont' require it after this point in execution. :rg_move[250] unset AM_SYNC_CALLED_BY :rg_move[253] [[ -f /tmp/.NFSSTOPPED ]] :rg_move[274] [[ -f /tmp/.RPCLOCKDSTOPPED ]] :rg_move[276] rm -f /tmp/.RPCLOCKDSTOPPED :rg_move[277] ATTEMPT=0 :rg_move[277] typeset -li ATTEMPT :rg_move[278] (( ATTEMPT++ < 60 )) :rg_move[280] : rpc.lockd status check :rg_move[281] lssrc -s rpc.lockd :rg_move[281] LC_ALL=C :rg_move[281] grep stopping :rg_move[282] (( 1 == 0 )) :rg_move[282] break :rg_move[285] startsrc -s rpc.lockd 0513-059 The rpc.lockd Subsystem has been started. Subsystem PID is 24642040. :rg_move[286] rcstartsrc=0 :rg_move[287] (( 0 != 0 )) :rg_move[293] exit 0 Jan 28 2023 19:50:52 EVENT COMPLETED: rg_move epprda 1 ACQUIRE 0 |2023-01-28T19:50:52|8602|EVENT COMPLETED: rg_move epprda 1 ACQUIRE 0| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-01-28T19:50:52.956473 :clevlog[amlog_trace:320] echo '|2023-01-28T19:50:52.956473|INFO: rg_move|epprd_rg|epprda|1|ACQUIRE|0' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :rg_move_acquire[+119] exit_status=0 :rg_move_acquire[+120] : exit status of clcallev rg_move epprda 1 ACQUIRE is: 0 :rg_move_acquire[+121] exit 0 Jan 28 2023 19:50:53 EVENT COMPLETED: rg_move_acquire epprda 1 0 |2023-01-28T19:50:53|8602|EVENT COMPLETED: rg_move_acquire epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T19:50:53.083100 + echo '|2023-01-28T19:50:53.083100|INFO: rg_move_acquire|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log Jan 28 2023 19:50:53 EVENT START: rg_move_complete epprda 1 |2023-01-28T19:50:53|8602|EVENT START: rg_move_complete epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T19:50:53.281301 + echo '|2023-01-28T19:50:53.281301|INFO: rg_move_complete|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :rg_move_complete[91] version=%I% :rg_move_complete[97] STATUS=0 :rg_move_complete[97] typeset -li STATUS :rg_move_complete[99] [[ -z '' ]] :rg_move_complete[101] EMULATE=REAL :rg_move_complete[104] set -u :rg_move_complete[106] (( 2 < 2 || 2 > 3 )) :rg_move_complete[112] NODENAME=epprda :rg_move_complete[112] export NODENAME :rg_move_complete[113] RGID=1 :rg_move_complete[114] (( 2 == 3 )) :rg_move_complete[118] RGDESTINATION='' :rg_move_complete[122] : serial number for this event is 8602 :rg_move_complete[126] : Interpret resource group ID into a resource group name. :rg_move_complete[128] clodmget -qid=1 -f group -n HACMPgroup :rg_move_complete[128] eval RGNAME=epprd_rg :rg_move_complete[1] RGNAME=epprd_rg +epprd_rg:rg_move_complete[129] GROUPNAME=epprd_rg +epprd_rg:rg_move_complete[131] UPDATESTATD=0 +epprd_rg:rg_move_complete[131] typeset -li UPDATESTATD +epprd_rg:rg_move_complete[132] NFSSTOPPED=0 +epprd_rg:rg_move_complete[132] typeset -li NFSSTOPPED +epprd_rg:rg_move_complete[133] LIMIT=60 +epprd_rg:rg_move_complete[133] WAIT=1 +epprd_rg:rg_move_complete[133] TRY=0 +epprd_rg:rg_move_complete[133] typeset -li LIMIT WAIT TRY +epprd_rg:rg_move_complete[136] : If this is a two node cluster and exported filesystems exist, then +epprd_rg:rg_move_complete[137] : when the cluster topology is stable notify rpc.statd of the changes. +epprd_rg:rg_move_complete[139] clnodename +epprd_rg:rg_move_complete[139] wc -l +epprd_rg:rg_move_complete[139] (( 2 == 2 )) +epprd_rg:rg_move_complete[141] clodmget -f group -n HACMPgroup +epprd_rg:rg_move_complete[141] RESOURCE_GROUPS=epprd_rg +epprd_rg:rg_move_complete[144] clodmget -q group='epprd_rg AND name=EXPORT_FILESYSTEM' -f value -n HACMPresource +epprd_rg:rg_move_complete[144] EXPORTLIST=$'/board_org\n/sapmnt/EPP' +epprd_rg:rg_move_complete[146] [[ -n $'/board_org\n/sapmnt/EPP' ]] +epprd_rg:rg_move_complete[146] [[ epprd_rg == epprd_rg ]] +epprd_rg:rg_move_complete[148] UPDATESTATD=1 +epprd_rg:rg_move_complete[149] [[ REAL == EMUL ]] +epprd_rg:rg_move_complete[154] cl_update_statd :cl_update_statd(0)[+174] version=%I% :cl_update_statd(0)[+176] typeset -i RC=0 :cl_update_statd(0)[+178] LOCAL_FOUND= :cl_update_statd(0)[+179] TWIN_NAME= :cl_update_statd(0)[+180] [[ -z epprda ]] :cl_update_statd(0)[+181] :cl_update_statd(0)[+181] cl_get_path -S OP_SEP=~ :cl_update_statd(0)[+182] set -u :cl_update_statd(0)[+187] LOCAL_FOUND=true :cl_update_statd(0)[+189] TWIN_NAME=epprds :cl_update_statd(0)[+194] : Make sure statd is running locally :cl_update_statd(0)[+196] lssrc -s statd :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw inoperative :cl_update_statd(0)[+196] rpcinfo -p :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw status :cl_update_statd(0)[+207] : Get the current twin, if there is one :cl_update_statd(0)[+209] :cl_update_statd(0)[+209] nfso -H sm_gethost :cl_update_statd(0)[+209] 2>& 1 CURTWIN=epprds :cl_update_statd(0)[+210] RC=0 :cl_update_statd(0)[+212] [[ -z true ]] :cl_update_statd(0)[+212] [[ -z epprds ]] :cl_update_statd(0)[+225] : Get the interface to the twin node :cl_update_statd(0)[+227] :cl_update_statd(0)[+227] get_node_ip epprds :cl_update_statd(0)[+9] (( 1 != 1 )) :cl_update_statd(0)[+15] Twin_Name=epprds :cl_update_statd(0)[+16] NewTwin= :cl_update_statd(0)[+19] : Get the Interface details for every interface on the twin node :cl_update_statd(0)[+20] : Reject interfaces on nodes that are not public boot addresses :cl_update_statd(0)[+21] : because those are the only ones we have state information for :cl_update_statd(0)[+23] :cl_update_statd(0)[+23] cllsif -J ~ -Sw -i epprda :cl_update_statd(0)[+23] LC_ALL=C LOCAL_NETWORK_INFO=epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+24] cllsif -J ~ -Sw -i epprds :cl_update_statd(0)[+24] LC_ALL=C :cl_update_statd(0)[+25] read adapt type network net_type attrib node ip_addr skip interface skip netmask skip skip prefix ip_family :cl_update_statd(0)[+25] IFS=~ :cl_update_statd(0)[+25] [[ public != public ]] :cl_update_statd(0)[+25] [[ boot != boot ]] :cl_update_statd(0)[+33] : Find the state of this candidate :cl_update_statd(0)[+33] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+37] :cl_update_statd(0)[+37] print 61.81.244.123 :cl_update_statd(0)[+37] tr ./ xx addr=i61x81x244x123_epprds :cl_update_statd(0)[+43] eval candidate_state=${i61x81x244x123_epprds:-down} :cl_update_statd(0)[+43] candidate_state=UP :cl_update_statd(0)[+46] : If state is UP, check to see if this node can talk to it :cl_update_statd(0)[+46] [[ UP == UP ]] :cl_update_statd(0)[+50] ping -w 5 -c 1 -q 61.81.244.123 :cl_update_statd(0)[+50] 1> /dev/null :cl_update_statd(0)[+61] echo epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+61] tr \n :cl_update_statd(0)[+62] read lcl_adapt lcl_type lcl_network lcl_net_type lcl_attrib lcl_node lcl_ip_addr skip lcl_interface skip lcl_netmask skip skip lcl_prefix lcl_ip_family :cl_update_statd(0)[+62] IFS=~ :cl_update_statd(0)[+62] [[ net_ether_01 != net_ether_01 ]] :cl_update_statd(0)[+62] [[ boot != boot ]] :cl_update_statd(0)[+62] [[ public != public ]] :cl_update_statd(0)[+62] [[ AF_INET != AF_INET ]] :cl_update_statd(0)[+62] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+71] :cl_update_statd(0)[+71] print 61.81.244.134 :cl_update_statd(0)[+71] tr ./ xx addr=i61x81x244x134_epprda :cl_update_statd(0)[+77] eval lcl_candidate_state=${i61x81x244x134_epprda:-down} :cl_update_statd(0)[+77] lcl_candidate_state=UP :cl_update_statd(0)[+77] [[ UP == UP ]] :cl_update_statd(0)[+81] : epprds is on the same network as an interface that is up :cl_update_statd(0)[+82] : on the local node, and the attributes match. :cl_update_statd(0)[+84] NewTwin=epprds :cl_update_statd(0)[+85] break :cl_update_statd(0)[+85] [[ -n epprds ]] :cl_update_statd(0)[+91] break :cl_update_statd(0)[+91] [[ -z epprds ]] :cl_update_statd(0)[+100] echo epprds :cl_update_statd(0)[+101] return 0 NEWTWIN=epprds :cl_update_statd(0)[+227] [[ -z epprds ]] :cl_update_statd(0)[+227] [[ epprds != epprds ]] :cl_update_statd(0)[+259] : RC is actually 0 :cl_update_statd(0)[+266] return 0 +epprd_rg:rg_move_complete[155] (( 0 != 0 )) +epprd_rg:rg_move_complete[160] break +epprd_rg:rg_move_complete[166] : Set the RESOURCE_GROUPS environment variable with the names +epprd_rg:rg_move_complete[167] : of all resource groups participating in this event, and export +epprd_rg:rg_move_complete[168] : them to all successive scripts. +epprd_rg:rg_move_complete[170] set -a +epprd_rg:rg_move_complete[171] clsetenvgrp epprda rg_move_complete epprd_rg :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move_complete epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 +epprd_rg:rg_move_complete[171] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_complete[172] RC=0 +epprd_rg:rg_move_complete[173] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_complete[1] FORCEDOWN_GROUPS='' +epprd_rg:rg_move_complete[2] RESOURCE_GROUPS='' +epprd_rg:rg_move_complete[3] HOMELESS_GROUPS='' +epprd_rg:rg_move_complete[4] HOMELESS_FOLLOWER_GROUPS='' +epprd_rg:rg_move_complete[5] ERRSTATE_GROUPS='' +epprd_rg:rg_move_complete[6] PRINCIPAL_ACTIONS='' +epprd_rg:rg_move_complete[7] ASSOCIATE_ACTIONS='' +epprd_rg:rg_move_complete[8] AUXILLIARY_ACTIONS='' +epprd_rg:rg_move_complete[8] SIBLING_GROUPS='' +epprd_rg:rg_move_complete[9] SIBLING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[10] SIBLING_ACQUIRING_GROUPS='' +epprd_rg:rg_move_complete[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[12] SIBLING_RELEASING_GROUPS='' +epprd_rg:rg_move_complete[13] SIBLING_RELEASING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[174] set +a +epprd_rg:rg_move_complete[175] (( 0 != 0 )) +epprd_rg:rg_move_complete[182] : For each participating resource group, serially process the resources. +epprd_rg:rg_move_complete[251] (( 1 == 1 )) +epprd_rg:rg_move_complete[253] [[ REAL == EMUL ]] +epprd_rg:rg_move_complete[259] stopsrc -s rpc.lockd 0513-044 The rpc.lockd Subsystem was requested to stop. +epprd_rg:rg_move_complete[260] rcstopsrc=0 +epprd_rg:rg_move_complete[261] (( 0 != 0 )) +epprd_rg:rg_move_complete[266] (( TRY=0)) +epprd_rg:rg_move_complete[266] (( 0<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 1<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 2<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 3<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 4<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 5<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 6<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 7<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 8<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 9<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 10<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 11<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 12<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 13<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 14<60)) +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 15<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 16<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 17<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 18<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 19<60)) +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 20<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z '' ]] +epprd_rg:rg_move_complete[273] break +epprd_rg:rg_move_complete[277] [[ ! -z '' ]] +epprd_rg:rg_move_complete[300] : Sure that rpc.lockd stopped. Restart it. +epprd_rg:rg_move_complete[302] startsrc -s rpc.lockd 0513-059 The rpc.lockd Subsystem has been started. Subsystem PID is 24314162. +epprd_rg:rg_move_complete[303] rcstartsrc=0 +epprd_rg:rg_move_complete[304] (( 0 != 0 )) +epprd_rg:rg_move_complete[365] : If the resource group in this rg_move is now homeless, +epprd_rg:rg_move_complete[366] : then we need to put it into an error state. +epprd_rg:rg_move_complete[368] active_node=0 +epprd_rg:rg_move_complete[428] : If the resource group in this rg_move is now homeless_secondary, +epprd_rg:rg_move_complete[429] : then we need to put it into an errorsecondary state. +epprd_rg:rg_move_complete[437] : Set an error state for concurrent groups that have +epprd_rg:rg_move_complete[438] : been brought offline on this node by rg_move. +epprd_rg:rg_move_complete[453] AM_SYNC_CALLED_BY=RG_MOVE_COMPLETE +epprd_rg:rg_move_complete[453] export AM_SYNC_CALLED_BY +epprd_rg:rg_move_complete[454] process_resources :process_resources[3318] version=1.169 :process_resources[3321] STATUS=0 :process_resources[3322] sddsrv_off=FALSE :process_resources[3324] true :process_resources[3326] : call rgpa, and it will tell us what to do next :process_resources[3328] set -a :process_resources[3329] clRGPA :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa 2023-01-28T19:51:13.604589 clrgpa :clRGPA[+55] exit 0 :process_resources[3329] eval JOB_TYPE=SYNC_VGS ACTION=ACQUIRE VOLUME_GROUPS='"datavg"' RESOURCE_GROUPS='"epprd_rg' '"' :process_resources[1] JOB_TYPE=SYNC_VGS :process_resources[1] ACTION=ACQUIRE :process_resources[1] VOLUME_GROUPS=datavg :process_resources[1] RESOURCE_GROUPS='epprd_rg ' :process_resources[3330] RC=0 :process_resources[3331] set +a :process_resources[3333] (( 0 != 0 )) :process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ SYNC_VGS == RELEASE ]] +epprd_rg:process_resources[3360] [[ SYNC_VGS == ONLINE ]] +epprd_rg:process_resources[3474] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[3476] sync_volume_groups +epprd_rg:process_resources[sync_volume_groups:2699] PS4_FUNC=sync_volume_groups +epprd_rg:process_resources[sync_volume_groups:2699] typeset PS4_FUNC +epprd_rg:process_resources[sync_volume_groups:2700] [[ high == high ]] +epprd_rg:process_resources[sync_volume_groups:2700] set -x +epprd_rg:process_resources[sync_volume_groups:2701] STAT=0 +epprd_rg:process_resources[sync_volume_groups:2704] export GROUPNAME +epprd_rg:process_resources[sync_volume_groups:2706] get_list_head datavg +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo datavg +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo datavg +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[sync_volume_groups:2706] read LIST_OF_VOLUME_GROUPS_FOR_RG +epprd_rg:process_resources[sync_volume_groups:2707] get_list_tail datavg +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo datavg +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[sync_volume_groups:2707] read VOLUME_GROUPS +epprd_rg:process_resources[sync_volume_groups:2710] : Sync the active volume groups +epprd_rg:process_resources[sync_volume_groups:2712] lsvg -L -o +epprd_rg:process_resources[sync_volume_groups:2712] sort +epprd_rg:process_resources[sync_volume_groups:2712] 2> /tmp/lsvg.err +epprd_rg:process_resources[sync_volume_groups:2712] 1> /tmp/lsvg.out.24642046 +epprd_rg:process_resources[sync_volume_groups:2713] echo datavg +epprd_rg:process_resources[sync_volume_groups:2713] tr ' ' '\n' +epprd_rg:process_resources[sync_volume_groups:2714] sort +epprd_rg:process_resources[sync_volume_groups:2714] comm -12 /tmp/lsvg.out.24642046 - +epprd_rg:process_resources[sync_volume_groups:2716] cl_sync_vgs datavg +epprd_rg:process_resources[sync_volume_groups:2718] [[ -s /tmp/lsvg.err ]] +epprd_rg:process_resources[sync_volume_groups:2723] rm -f /tmp/lsvg.out.24642046 /tmp/lsvg.err +epprd_rg:cl_sync_vgs[303] version=1.24.1.4 +epprd_rg:cl_sync_vgs[306] (( 1 == 0 )) +epprd_rg:cl_sync_vgs[312] : syncing 4 stale PPs at a time seems to be a win most of the time, but +epprd_rg:cl_sync_vgs[313] : we honor the NUM_PARALLEL_LPS value from /etc/environment, as does +epprd_rg:cl_sync_vgs[314] : syncvg. +epprd_rg:cl_sync_vgs[316] syncflag='' +epprd_rg:cl_sync_vgs[316] export syncflag +epprd_rg:cl_sync_vgs[317] PS4_LOOP='' +epprd_rg:cl_sync_vgs[317] export PS4_LOOP +epprd_rg:cl_sync_vgs[318] typeset -i npl +epprd_rg:cl_sync_vgs[319] grep -q ^NUM_PARALLEL_LPS= /etc/environment +epprd_rg:process_resources[sync_volume_groups:2732] unset AM_SYNC_CALLED_BY +epprd_rg:process_resources[sync_volume_groups:2734] return 0 +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:cl_sync_vgs[321] syncflag=-P4 +epprd_rg:cl_sync_vgs[328] echo 'NOTE: While the sync is going on, volume group can be used' NOTE: While the sync is going on, volume group can be used +epprd_rg:cl_sync_vgs[331] : For GLVM volume groups, read PARALLEL LPS count from HACMPresource if it is set from GUI, +epprd_rg:cl_sync_vgs[332] : else, read from environment variables, if it is not set use 32 as default value. +epprd_rg:cl_sync_vgs[334] clodmget -q name='GMVG_REP_RESOURCE and value=datavg' -f group HACMPresource +epprd_rg:cl_sync_vgs[334] 2> /dev/null +epprd_rg:cl_sync_vgs[334] glvm_rg='' +epprd_rg:cl_sync_vgs[335] [[ -n '' ]] +epprd_rg:cl_sync_vgs[353] check_sync datavg +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:76] typeset vg_name +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:77] typeset vgid +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:78] typeset disklist +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:79] typeset lv_name +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:80] typeset -li stale_count +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:81] typeset -li mode +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:82] RC=0 +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:82] typeset -li RC +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:83] typeset site_node_list +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:84] typeset site_choice +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:86] vg_name=datavg +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:87] disklist='' +epprd_rg:cl_sync_vgs(0.029):datavg[check_sync:89] getlvodm -v datavg +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa +epprd_rg:cl_sync_vgs(0.034):datavg[check_sync:89] vgid=00c44af100004b00000001851e9dc053 +epprd_rg:cl_sync_vgs(0.034):datavg[check_sync:92] : find disks in the VG that LVM thinks are inaccessable +epprd_rg:cl_sync_vgs(0.034):datavg[check_sync:94] lsvg -L -p datavg +epprd_rg:cl_sync_vgs(0.035):datavg[check_sync:94] LC_ALL=C 2023-01-28T19:51:13.693144 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=APPLICATIONS ACTION=ACQUIRE ALL_APPLICATIONS='"epprd_app"' RESOURCE_GROUPS='"epprd_rg' '"' MISCDATA='""' +epprd_rg:process_resources[1] JOB_TYPE=APPLICATIONS +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] ALL_APPLICATIONS=epprd_app +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] MISCDATA='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ APPLICATIONS == RELEASE ]] +epprd_rg:process_resources[3360] [[ APPLICATIONS == ONLINE ]] +epprd_rg:process_resources[3549] process_applications ACQUIRE +epprd_rg:process_resources[process_applications:312] PS4_FUNC=process_applications +epprd_rg:process_resources[process_applications:312] typeset PS4_FUNC +epprd_rg:process_resources[process_applications:313] [[ high == high ]] +epprd_rg:process_resources[process_applications:313] set -x +epprd_rg:process_resources[process_applications:316] : Each subprocess will log to a file with this name and PID +epprd_rg:process_resources[process_applications:318] TMP_FILE=/var/hacmp/log/.process_resources_applications.24642046 +epprd_rg:process_resources[process_applications:318] export TMP_FILE +epprd_rg:process_resources[process_applications:320] rm -f '/var/hacmp/log/.process_resources_applications*' +epprd_rg:process_resources[process_applications:322] WAITPIDS='' +epprd_rg:process_resources[process_applications:323] LPAR_ACQUIRE_FAILED=0 +epprd_rg:process_resources[process_applications:324] LPAR_RELEASE_FAILED=0 +epprd_rg:process_resources[process_applications:325] START_STOP_FAILED=0 +epprd_rg:process_resources[process_applications:326] LIST_OF_APPS=epprd_app +epprd_rg:process_resources[process_applications:329] : Acquire lpar resources in one-shot before starting applications +epprd_rg:process_resources[process_applications:331] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[process_applications:333] GROUPNAME=epprd_rg +epprd_rg:process_resources[process_applications:333] export GROUPNAME +epprd_rg:process_resources[process_applications:334] clmanageroha -o acquire -s -l epprd_app +epprd_rg:process_resources[process_applications:334] 3>& 2 +epprd_rg:clmanageroha[318] : version='@(#)' 5881272 43haes/usr/sbin/cluster/events/clmanageroha.sh, 61aha_r726, 2205A_aha726, May 16 2022 12:15 PM +epprd_rg:clmanageroha[321] clodmget -n -f connection_type HACMPhmcparam +epprd_rg:clmanageroha[321] CONN_TYPE=0 +epprd_rg:clmanageroha[321] typeset -i CONN_TYPE +epprd_rg:clmanageroha[323] clodmget -q name='epprda and object like POWERVS_*' -nf name HACMPnode +epprd_rg:clmanageroha[323] 2> /dev/null +epprd_rg:clmanageroha[323] [[ -n '' ]] +epprd_rg:clmanageroha[326] export CONN_TYPE +epprd_rg:clmanageroha[331] roha_session_open -o acquire -s -l epprd_app +epprd_rg:clmanageroha[roha_session_open:131] roha_session.id=26542382 +epprd_rg:clmanageroha[roha_session_open:132] date +epprd_rg:clmanageroha[roha_session_open:132] LC_ALL=C +epprd_rg:clmanageroha[roha_session_open:132] roha_session_log 'Open session 26542382 at Sat Jan 28 19:51:13 KORST 2023' [ROHALOG:26542382:(0.071)] Open session 26542382 at Sat Jan 28 19:51:13 KORST 2023 +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:146] roha_session.operation=acquire +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:143] roha_session.systemmirror_mode=1 +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:149] roha_session.optimal_apps=epprd_app +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:163] [[ acquire != @(acquire|release|adjust) ]] +epprd_rg:clmanageroha[roha_session_open:168] no_roha_apps=0 +epprd_rg:clmanageroha[roha_session_open:168] typeset -i no_roha_apps +epprd_rg:clmanageroha[roha_session_open:169] need_explicit_res_rel=0 +epprd_rg:clmanageroha[roha_session_open:169] typeset -i need_explicit_res_rel +epprd_rg:clmanageroha[roha_session_open:187] [[ -n epprd_app ]] +epprd_rg:clmanageroha[roha_session_open:187] clmgr q roha +epprd_rg:clmanageroha[roha_session_open:187] sort +epprd_rg:clmanageroha[roha_session_open:187] uniq -d +epprd_rg:cl_sync_vgs(0.184):datavg[check_sync:94] disklist=$'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:cl_sync_vgs(0.185):datavg[check_sync:95] print -- $'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:cl_sync_vgs(0.192):datavg[check_sync:95] grep -w missing +epprd_rg:cl_sync_vgs(0.194):datavg[check_sync:95] cut -f1 '-d ' +epprd_rg:cl_sync_vgs(0.199):datavg[check_sync:95] missing_disklist='' +epprd_rg:cl_sync_vgs(0.201):datavg[check_sync:96] print -- $'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:cl_sync_vgs(0.205):datavg[check_sync:96] grep -w removed +epprd_rg:cl_sync_vgs(0.211):datavg[check_sync:96] cut -f1 '-d ' +epprd_rg:cl_sync_vgs(0.219):datavg[check_sync:96] removed_disklist='' +epprd_rg:cl_sync_vgs(0.219):datavg[check_sync:100] : Proceeed if there are some disks that LVM thinks are inaccessable +epprd_rg:cl_sync_vgs(0.220):datavg[check_sync:102] [[ -n '' ]] +epprd_rg:cl_sync_vgs(0.220):datavg[check_sync:102] [[ -n '' ]] +epprd_rg:cl_sync_vgs(0.220):datavg[check_sync:196] : sync if any LVs in the VG that have stale partitions +epprd_rg:cl_sync_vgs(0.220):datavg[check_sync:198] (( 0 == 0 )) +epprd_rg:cl_sync_vgs(0.220):datavg[check_sync:201] : A status of 2,3,5 or 7 indicates the presence of dirty and/or stale partitions +epprd_rg:cl_sync_vgs(0.220):datavg[check_sync:213] is_start_logged=0 +epprd_rg:cl_sync_vgs(0.220):datavg[check_sync:218] at_least_one_sync_success=0 +epprd_rg:cl_sync_vgs(0.220):datavg[check_sync:219] lqueryvg -g 00c44af100004b00000001851e9dc053 -L +epprd_rg:cl_sync_vgs(0.224):datavg[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.316):datavg[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.316):datavg.epprdaloglv[check_sync:221] PS4_LOOP=datavg.epprdaloglv +epprd_rg:cl_sync_vgs(0.316):datavg.epprdaloglv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.316):datavg.epprdaloglv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.316):datavg.epprdaloglv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.316):datavg.epprdaloglv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.316):datavg.epprdaloglv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.316):datavg.saplv[check_sync:221] PS4_LOOP=datavg.saplv +epprd_rg:cl_sync_vgs(0.316):datavg.saplv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.316):datavg.saplv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.316):datavg.saplv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.316):datavg.saplv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.316):datavg.saplv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.316):datavg.sapmntlv[check_sync:221] PS4_LOOP=datavg.sapmntlv +epprd_rg:cl_sync_vgs(0.316):datavg.sapmntlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.316):datavg.sapmntlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.316):datavg.sapmntlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.316):datavg.sapmntlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.316):datavg.sapmntlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.317):datavg.oraclelv[check_sync:221] PS4_LOOP=datavg.oraclelv +epprd_rg:cl_sync_vgs(0.317):datavg.oraclelv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.317):datavg.oraclelv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.317):datavg.oraclelv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.317):datavg.oraclelv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.317):datavg.oraclelv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.317):datavg.epplv[check_sync:221] PS4_LOOP=datavg.epplv +epprd_rg:cl_sync_vgs(0.317):datavg.epplv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.317):datavg.epplv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.317):datavg.epplv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.317):datavg.epplv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.317):datavg.epplv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.317):datavg.oraarchlv[check_sync:221] PS4_LOOP=datavg.oraarchlv +epprd_rg:cl_sync_vgs(0.317):datavg.oraarchlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.317):datavg.oraarchlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.317):datavg.oraarchlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.317):datavg.oraarchlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.317):datavg.oraarchlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.317):datavg.sapdata1lv[check_sync:221] PS4_LOOP=datavg.sapdata1lv +epprd_rg:cl_sync_vgs(0.317):datavg.sapdata1lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.317):datavg.sapdata1lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.317):datavg.sapdata1lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.317):datavg.sapdata1lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.317):datavg.sapdata1lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.317):datavg.sapdata2lv[check_sync:221] PS4_LOOP=datavg.sapdata2lv +epprd_rg:cl_sync_vgs(0.317):datavg.sapdata2lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.317):datavg.sapdata2lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.317):datavg.sapdata2lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.317):datavg.sapdata2lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.317):datavg.sapdata2lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.317):datavg.sapdata3lv[check_sync:221] PS4_LOOP=datavg.sapdata3lv +epprd_rg:cl_sync_vgs(0.317):datavg.sapdata3lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.317):datavg.sapdata3lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.317):datavg.sapdata3lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.317):datavg.sapdata3lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.317):datavg.sapdata3lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.317):datavg.sapdata4lv[check_sync:221] PS4_LOOP=datavg.sapdata4lv +epprd_rg:cl_sync_vgs(0.317):datavg.sapdata4lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.317):datavg.sapdata4lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.317):datavg.sapdata4lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.317):datavg.sapdata4lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.317):datavg.sapdata4lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.317):datavg.boardlv[check_sync:221] PS4_LOOP=datavg.boardlv +epprd_rg:cl_sync_vgs(0.317):datavg.boardlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.317):datavg.boardlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.317):datavg.boardlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.317):datavg.boardlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.317):datavg.boardlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.317):datavg.origlogAlv[check_sync:221] PS4_LOOP=datavg.origlogAlv +epprd_rg:cl_sync_vgs(0.317):datavg.origlogAlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.317):datavg.origlogAlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.317):datavg.origlogAlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.317):datavg.origlogAlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.317):datavg.origlogAlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.317):datavg.origlogBlv[check_sync:221] PS4_LOOP=datavg.origlogBlv +epprd_rg:cl_sync_vgs(0.317):datavg.origlogBlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.317):datavg.origlogBlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.317):datavg.origlogBlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.317):datavg.origlogBlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.317):datavg.origlogBlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.317):datavg.mirrlogAlv[check_sync:221] PS4_LOOP=datavg.mirrlogAlv +epprd_rg:cl_sync_vgs(0.317):datavg.mirrlogAlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.317):datavg.mirrlogAlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.317):datavg.mirrlogAlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.317):datavg.mirrlogAlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.317):datavg.mirrlogAlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.317):datavg.mirrlogBlv[check_sync:221] PS4_LOOP=datavg.mirrlogBlv +epprd_rg:cl_sync_vgs(0.317):datavg.mirrlogBlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.317):datavg.mirrlogBlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.317):datavg.mirrlogBlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.317):datavg.mirrlogBlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.317):datavg.mirrlogBlv[check_sync:268] [[ -n RG_MOVE_COMPLETE ]] +epprd_rg:cl_sync_vgs(0.317):datavg.mirrlogBlv[check_sync:268] (( 0 == 1 )) +epprd_rg:cl_sync_vgs[355] exit 0 +epprd_rg:clmanageroha[roha_session_open:187] echo epprd_app +epprd_rg:clmanageroha[roha_session_open:187] sort -u +epprd_rg:clmanageroha[roha_session_open:187] echo '\nepprd_app' +epprd_rg:clmanageroha[roha_session_open:187] [[ -z '' ]] +epprd_rg:clmanageroha[roha_session_open:189] roha_session_log 'INFO: No ROHA configured on applications.\n' [ROHALOG:26542382:(0.570)] INFO: No ROHA configured on applications. [ROHALOG:26542382:(0.570)] +epprd_rg:clmanageroha[roha_session_open:190] no_roha_apps=1 +epprd_rg:clmanageroha[roha_session_open:195] read_tunables +epprd_rg:clmanageroha[roha_session_open:196] echo '' +epprd_rg:clmanageroha[roha_session_open:196] grep -q epprda +epprd_rg:clmanageroha[roha_session_open:197] (( 1 == 0 )) +epprd_rg:clmanageroha[roha_session_open:202] (( 1 == 1 )) +epprd_rg:clmanageroha[roha_session_open:203] roha_session_read_odm_dynresop DLPAR_MEM +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_MEM -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:203] (( 0 == 0.00 )) +epprd_rg:clmanageroha[roha_session_open:204] roha_session_read_odm_dynresop DLPAR_PROCS +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_PROCS -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:204] (( 0 == 0 )) +epprd_rg:clmanageroha[roha_session_open:205] roha_session_read_odm_dynresop DLPAR_PROC_UNITS +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_PROC_UNITS -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:205] (( 0 == 0.00 )) +epprd_rg:clmanageroha[roha_session_open:206] roha_session_log 'INFO: Nothing to be done.\n' [ROHALOG:26542382:(0.645)] INFO: Nothing to be done. [ROHALOG:26542382:(0.645)] +epprd_rg:clmanageroha[roha_session_open:207] exit 0 +epprd_rg:process_resources[process_applications:335] RC=0 +epprd_rg:process_resources[process_applications:336] (( 0 != 0 )) +epprd_rg:process_resources[process_applications:343] (( LPAR_ACQUIRE_FAILED == 0 )) +epprd_rg:process_resources[process_applications:345] : Loop through all groups to start or stop applications +epprd_rg:process_resources[process_applications:348] export GROUPNAME +epprd_rg:process_resources[process_applications:351] : Break out application data +epprd_rg:process_resources[process_applications:353] get_list_head epprd_app +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo epprd_app +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo epprd_app +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[process_applications:353] read LIST_OF_APPLICATIONS_FOR_RG +epprd_rg:process_resources[process_applications:354] get_list_tail epprd_app +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo epprd_app +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[process_applications:354] read ALL_APPLICATIONS +epprd_rg:process_resources[process_applications:356] get_list_head +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[process_applications:356] read MISCDATA_FOR_RG +epprd_rg:process_resources[process_applications:357] get_list_tail +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[process_applications:357] read MISCDATA +epprd_rg:process_resources[process_applications:359] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[process_applications:374] APPLICATIONS=epprd_app +epprd_rg:process_resources[process_applications:374] export APPLICATIONS +epprd_rg:process_resources[process_applications:375] MISC_DATA='' +epprd_rg:process_resources[process_applications:375] export MISC_DATA +epprd_rg:process_resources[process_applications:378] : Now call start_or_stop_applications_for_rg to do the app start/stop. +epprd_rg:process_resources[process_applications:381] start_or_stop_applications_for_rg ACQUIRE /var/hacmp/log/.process_resources_applications.24642046.epprd_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:248] PS4_FUNC=start_or_stop_applications_for_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:248] typeset PS4_FUNC +epprd_rg:process_resources[start_or_stop_applications_for_rg:249] [[ high == high ]] +epprd_rg:process_resources[start_or_stop_applications_for_rg:249] set -x +epprd_rg:process_resources[start_or_stop_applications_for_rg:251] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[start_or_stop_applications_for_rg:253] cmd_to_execute=start_server +epprd_rg:process_resources[start_or_stop_applications_for_rg:259] : File name to store our exit status +epprd_rg:process_resources[start_or_stop_applications_for_rg:261] STATUS_FILE=/var/hacmp/log/.process_resources_applications.24642046.epprd_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:264] : Use clcallev to run the event +epprd_rg:process_resources[start_or_stop_applications_for_rg:266] clcallev start_server epprd_app +epprd_rg:process_resources[process_applications:384] : Add PID of the last bg start_or_stop_applications_for_rg process to WAITPIDS. +epprd_rg:process_resources[process_applications:386] WAITPIDS=' 24445364' +epprd_rg:process_resources[process_applications:390] : Wait for the start_or_stop_applications_for_rg PIDs to finish. +epprd_rg:process_resources[process_applications:393] wait 24445364 Jan 28 2023 19:51:14 EVENT START: start_server epprd_app |2023-01-28T19:51:14|8602|EVENT START: start_server epprd_app| +epprd_rg:start_server[+206] version=%I% +epprd_rg:start_server[+210] export TMP_FILE=/var/hacmp/log/.start_server.26739168 +epprd_rg:start_server[+211] export DCD=/etc/es/objrepos +epprd_rg:start_server[+212] export ACD=/usr/es/sbin/cluster/etc/objrepos/active +epprd_rg:start_server[+214] rm -f /var/hacmp/log/.start_server.26739168 +epprd_rg:start_server[+216] STATUS=0 +epprd_rg:start_server[+220] PROC_RES=false +epprd_rg:start_server[+224] [[ APPLICATIONS != 0 ]] +epprd_rg:start_server[+224] [[ APPLICATIONS != GROUP ]] +epprd_rg:start_server[+225] PROC_RES=true +epprd_rg:start_server[+228] set -u +epprd_rg:start_server[+229] typeset WPARNAME EXEC WPARDIR +epprd_rg:start_server[+230] export WPARNAME EXEC WPARDIR +epprd_rg:start_server[+232] EXEC= +epprd_rg:start_server[+233] WPARNAME= +epprd_rg:start_server[+234] WPARDIR= +epprd_rg:start_server[+237] ALLSERVERS=All_servers +epprd_rg:start_server[+238] ALLNOERRSERV=All_nonerror_servers +epprd_rg:start_server[+239] cl_RMupdate resource_acquiring All_servers start_server 2023-01-28T19:51:14.487834 2023-01-28T19:51:14.492229 +epprd_rg:start_server[+241] +epprd_rg:start_server[+241] clwparname epprd_rg +epprd_rg:clwparname[38] version=1.3.1.1 +epprd_rg:clwparname[44] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clwparname[44] [[ -z '' ]] +epprd_rg:clwparname[44] exit 0 WPARNAME= +epprd_rg:start_server[+243] (( 0 == 0 )) +epprd_rg:start_server[+243] [[ -n ]] +epprd_rg:start_server[+258] start_and_monitor_server epprd_app +epprd_rg:start_server[start_and_monitor_server+5] RETURN_STATUS=0 +epprd_rg:start_server[start_and_monitor_server+7] server=epprd_app +epprd_rg:start_server[start_and_monitor_server+12] echo Checking whether epprd_app is already running...\n Checking whether epprd_app is already running... +epprd_rg:start_server[start_and_monitor_server+12] [[ -n ]] +epprd_rg:start_server[start_and_monitor_server+18] cl_app_startup_monitor -s epprd_app -a +epprd_rg:start_server[+261] wait +epprd_rg:start_server[start_and_monitor_server+21] RETURN_STATUS=1 +epprd_rg:start_server[start_and_monitor_server+22] : exit status of cl_app_startup_monitor is: 1 +epprd_rg:start_server[start_and_monitor_server+22] [[ 1 == 0 ]] +epprd_rg:start_server[start_and_monitor_server+33] echo Application monitor(s) indicate that epprd_app is not active. Continuing with application startup.\n Application monitor(s) indicate that epprd_app is not active. Continuing with application startup. +epprd_rg:start_server[start_and_monitor_server+42] +epprd_rg:start_server[start_and_monitor_server+42] cllsserv -cn epprd_app +epprd_rg:start_server[start_and_monitor_server+42] cut -d: -f2 START=/etc/hacmp/epprd_start.sh +epprd_rg:start_server[start_and_monitor_server+43] +epprd_rg:start_server[start_and_monitor_server+43] echo /etc/hacmp/epprd_start.sh +epprd_rg:start_server[start_and_monitor_server+43] cut -d -f1 START_SCRIPT=/etc/hacmp/epprd_start.sh +epprd_rg:start_server[start_and_monitor_server+44] +epprd_rg:start_server[start_and_monitor_server+44] cllsserv -cn epprd_app +epprd_rg:start_server[start_and_monitor_server+44] cut -d: -f4 START_MODE=background +epprd_rg:start_server[start_and_monitor_server+44] [[ -z background ]] +epprd_rg:start_server[start_and_monitor_server+47] PATTERN=epprda epprd_app +epprd_rg:start_server[start_and_monitor_server+48] RETURN_STATUS=0 +epprd_rg:start_server[start_and_monitor_server+51] amlog_trace Starting application controller in background|epprd_app +epprd_rg:start_server[start_and_monitor_server+200] clcycle clavailability.log +epprd_rg:start_server[start_and_monitor_server+200] 1> /dev/null 2>& 1 +epprd_rg:start_server[start_and_monitor_server+200] +epprd_rg:start_server[start_and_monitor_server+200] cltime DATE=2023-01-28T19:51:14.544674 +epprd_rg:start_server[start_and_monitor_server+200] echo |2023-01-28T19:51:14.544674|INFO: Starting application controller in background|epprd_app +epprd_rg:start_server[start_and_monitor_server+200] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:start_server[start_and_monitor_server+51] [[ -n ]] +epprd_rg:start_server[start_and_monitor_server+51] [[ -z ]] +epprd_rg:start_server[start_and_monitor_server+51] [[ -x /etc/hacmp/epprd_start.sh ]] +epprd_rg:start_server[start_and_monitor_server+60] [ background == background ] +epprd_rg:start_server[start_and_monitor_server+62] date +epprd_rg:start_server[start_and_monitor_server+62] LC_ALL=C +epprd_rg:start_server[start_and_monitor_server+62] echo Running application controller start script for epprd_app in the background at Sat Jan 28 19:51:14 KORST 2023.\n Running application controller start script for epprd_app in the background at Sat Jan 28 19:51:14 KORST 2023. +epprd_rg:start_server[start_and_monitor_server+63] /etc/hacmp/epprd_start.sh +epprd_rg:start_server[start_and_monitor_server+63] ODMDIR=/etc/es/objrepos +epprd_rg:start_server[start_and_monitor_server+62] [[ 0 != 0 ]] +epprd_rg:start_server[start_and_monitor_server+62] [[ -n ]] +epprd_rg:start_server[start_and_monitor_server+94] cl_app_startup_monitor -s epprd_app ##### # # ##### ## ##### ##### # # # #### # # # # # # # # ## # # # ##### # # # # # # # # # # # # # ###### ##### # # # # # # ### # # # # # # # # # # ## # # ##### # # # # # # # # # #### +epprd_rg:start_server[start_and_monitor_server+97] RETURN_STATUS=0 +epprd_rg:start_server[start_and_monitor_server+98] : exit status of cl_app_startup_monitor is: 0 +epprd_rg:start_server[start_and_monitor_server+98] [[ 0 != 0 ]] +epprd_rg:start_server[start_and_monitor_server+109] echo epprd_app 0 +epprd_rg:start_server[start_and_monitor_server+109] ##### # ###### ####### ###### ###### # # # # # # # # # # # # # # # # # # # # # ##### # # ###### ##### ###### ###### # ####### # # # # # # # # # # # # ##### # # # ####### # # 1> /var/hacmp/log/.start_server.26739168.epprd_app +epprd_rg:start_server[start_and_monitor_server+112] +epprd_rg:start_server[start_and_monitor_server+112] cllsserv -cn epprd_app +epprd_rg:start_server[start_and_monitor_server+112] cut -d: -f4 START_MODE=background +epprd_rg:start_server[start_and_monitor_server+112] [[ background == foreground ]] +epprd_rg:start_server[start_and_monitor_server+132] return 0 +epprd_rg:/usr/es/sbin/cluster/utilities/cllsgrp[51] [[ high == high ]] +epprd_rg:/usr/es/sbin/cluster/utilities/cllsgrp[51] version=1.11 +epprd_rg:/usr/es/sbin/cluster/utilities/cllsgrp[52] [ 0 -gt 1 ] +epprd_rg:/usr/es/sbin/cluster/utilities/cllsgrp[59] [[ '' == -p ]] +epprd_rg:/usr/es/sbin/cluster/utilities/cllsgrp[65] [[ '' == -n ]] +epprd_rg:/usr/es/sbin/cluster/utilities/cllsgrp[78] clgetgrp -f group +epprd_rg:/usr/es/sbin/cluster/utilities/cllsgrp[78] 2> /dev/null +epprd_rg:start_server[+266] +epprd_rg:start_server[+266] cllsserv -cn epprd_app +epprd_rg:/usr/es/sbin/cluster/utilities/cllsgrp[78] sort +epprd_rg:start_server[+266] cut -d: -f4 START_MODE=background +epprd_rg:start_server[+267] [ background == background ] +epprd_rg:start_server[+269] +epprd_rg:start_server[+269] cat /var/hacmp/log/.start_server.26739168.epprd_app +epprd_rg:start_server[+269] cut -f2 -d SUCCESS=0 +epprd_rg:start_server[+269] [[ 0 != 0 ]] +epprd_rg:start_server[+274] amlog_trace Starting application controller in background|epprd_app +epprd_rg:start_server[+200] clcycle clavailability.log +epprd_rg:start_server[+200] 1> /dev/null 2>& 1 cllsres: Resource Group not configured or not found. +epprd_rg:start_server[+200] +epprd_rg:start_server[+200] cltime DATE=2023-01-28T19:51:14.628772 +epprd_rg:start_server[+200] echo |2023-01-28T19:51:14.628772|INFO: Starting application controller in background|epprd_app +epprd_rg:start_server[+200] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:start_server[+276] +epprd_rg:start_server[+276] clodmget -q name = epprd_app -n -f cpu_usage_monitor HACMPserver MACTIVE=no +epprd_rg:start_server[+276] [[ no == yes ]] +epprd_rg:start_server[+292] +epprd_rg:start_server[+292] cat /var/hacmp/log/.start_server.26739168.epprd_app +epprd_rg:start_server[+292] cut -f2 -d SUCCESS=0 +epprd_rg:start_server[+292] [[ 0 != +([0-9]) ]] +epprd_rg:start_server[+297] (( 0 != 0 )) +epprd_rg:start_server[+303] [[ 0 == 0 ]] +epprd_rg:start_server[+306] rm -f /var/hacmp/log/.start_server.26739168.epprd_app +epprd_rg:start_server[+308] cl_RMupdate resource_up All_nonerror_servers start_server 2023-01-28T19:51:14.667419 2023-01-28T19:51:14.671934 +epprd_rg:start_server[+314] exit 0 Jan 28 2023 19:51:14 EVENT COMPLETED: start_server epprd_app 0 |2023-01-28T19:51:14|8602|EVENT COMPLETED: start_server epprd_app 0| +epprd_rg:process_resources[start_or_stop_applications_for_rg:267] RC=0 +epprd_rg:process_resources[start_or_stop_applications_for_rg:269] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[start_or_stop_applications_for_rg:279] (( 0 != 0 )) +epprd_rg:process_resources[start_or_stop_applications_for_rg:291] : Store the result for later accumulation +epprd_rg:process_resources[start_or_stop_applications_for_rg:293] print 'epprd_rg 0' +epprd_rg:process_resources[start_or_stop_applications_for_rg:293] 1>> /var/hacmp/log/.process_resources_applications.24642046.epprd_rg +epprd_rg:process_resources[process_applications:396] : Look at all the status files to see if any were unsuccessful +epprd_rg:process_resources[process_applications:399] cat /var/hacmp/log/.process_resources_applications.24642046.epprd_rg +epprd_rg:process_resources[process_applications:399] read skip SUCCESS rest +epprd_rg:process_resources[process_applications:401] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:411] rm -f /var/hacmp/log/.process_resources_applications.24642046.epprd_rg +epprd_rg:process_resources[process_applications:416] : Release lpar resources in one-shot now that applications are stopped +epprd_rg:process_resources[process_applications:418] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[process_applications:433] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:434] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:435] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:439] return 0 +epprd_rg:process_resources[3550] RC=0 +epprd_rg:process_resources[3551] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T19:51:14.821987 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=ONLINE RESOURCE_GROUPS='"epprd_rg"' +epprd_rg:process_resources[1] JOB_TYPE=ONLINE +epprd_rg:process_resources[1] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ ONLINE == RELEASE ]] +epprd_rg:process_resources[3360] [[ ONLINE == ONLINE ]] +epprd_rg:process_resources[3363] INFO_STRING='' +epprd_rg:process_resources[3364] clnodename +epprd_rg:process_resources[3373] ENV_VAR=GROUP_epprd_rg_epprda +epprd_rg:process_resources[3374] eval 'echo $GROUP_epprd_rg_epprda' +epprd_rg:process_resources[1] echo WILLBEUPPOSTEVENT +epprd_rg:process_resources[3374] read ENV_VAR +epprd_rg:process_resources[3375] [[ WILLBEUPPOSTEVENT == WILLBEUPPOSTEVENT ]] +epprd_rg:process_resources[3376] INFO_STRING='|DESTINATION=epprda' +epprd_rg:process_resources[3377] IS_SERVICE_STOP=0 +epprd_rg:process_resources[3379] [[ WILLBEUPPOSTEVENT == ISUPPREEVENT ]] +epprd_rg:process_resources[3373] ENV_VAR=GROUP_epprd_rg_epprds +epprd_rg:process_resources[3374] eval 'echo $GROUP_epprd_rg_epprds' +epprd_rg:process_resources[1] echo ISUPPREEVENT +epprd_rg:process_resources[3374] read ENV_VAR +epprd_rg:process_resources[3375] [[ ISUPPREEVENT == WILLBEUPPOSTEVENT ]] +epprd_rg:process_resources[3379] [[ ISUPPREEVENT == ISUPPREEVENT ]] +epprd_rg:process_resources[3380] INFO_STRING='|DESTINATION=epprda|SOURCE=epprds' +epprd_rg:process_resources[3381] IS_SERVICE_START=0 +epprd_rg:process_resources[3384] (( 0 == 0 && 0 ==0 )) +epprd_rg:process_resources[3385] eval 'echo $ISUPPREEVENT' +epprd_rg:process_resources[1] echo +epprd_rg:process_resources[3385] read ENV_VAR +epprd_rg:process_resources[3386] INFO_STRING='RG_FAILOVER|epprd_rg|DESTINATION=epprda|SOURCE=epprds|8602' +epprd_rg:process_resources[3387] [[ ONLINE == RELEASE ]] +epprd_rg:process_resources[3390] amlog_trace '' 'RG_FAILOVER|epprd_rg|DESTINATION=epprda|SOURCE=epprds|8602' +epprd_rg:process_resources[amlog_trace:318] clcycle clavailability.log +epprd_rg:process_resources[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:process_resources[amlog_trace:319] cltime +epprd_rg:process_resources[amlog_trace:319] DATE=2023-01-28T19:51:14.891286 +epprd_rg:process_resources[amlog_trace:320] echo '|2023-01-28T19:51:14.891286|INFO: RG_FAILOVER|epprd_rg|DESTINATION=epprda|SOURCE=epprds|8602' +epprd_rg:process_resources[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:process_resources[3673] set_resource_group_state UP +epprd_rg:process_resources[set_resource_group_state:82] PS4_FUNC=set_resource_group_state +epprd_rg:process_resources[set_resource_group_state:82] typeset PS4_FUNC +epprd_rg:process_resources[set_resource_group_state:83] [[ high == high ]] +epprd_rg:process_resources[set_resource_group_state:83] set -x +epprd_rg:process_resources[set_resource_group_state:84] STAT=0 +epprd_rg:process_resources[set_resource_group_state:85] new_status=UP +epprd_rg:process_resources[set_resource_group_state:89] export GROUPNAME +epprd_rg:process_resources[set_resource_group_state:90] [[ UP != DOWN ]] +epprd_rg:process_resources[set_resource_group_state:92] clchdaemons -d clstrmgr_scripts -t resource_locator -n epprda -o epprd_rg -v UP +epprd_rg:process_resources[set_resource_group_state:100] : Resource Manager Updates +epprd_rg:process_resources[set_resource_group_state:116] cl_RMupdate rg_up epprd_rg process_resources 2023-01-28T19:51:14.937443 2023-01-28T19:51:14.947021 +epprd_rg:process_resources[set_resource_group_state:118] amlog_trace '' 'acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:318] clcycle clavailability.log +epprd_rg:process_resources[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:process_resources[amlog_trace:319] cltime +epprd_rg:process_resources[amlog_trace:319] DATE=2023-01-28T19:51:14.986761 +epprd_rg:process_resources[amlog_trace:320] echo '|2023-01-28T19:51:14.986761|INFO: acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:process_resources[set_resource_group_state:153] return 0 +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-01-28T19:51:14.999123 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=NONE +epprd_rg:process_resources[1] JOB_TYPE=NONE +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ NONE == RELEASE ]] +epprd_rg:process_resources[3360] [[ NONE == ONLINE ]] +epprd_rg:process_resources[3729] break +epprd_rg:process_resources[3740] : If sddsrv was turned off above, turn it back on again +epprd_rg:process_resources[3742] [[ FALSE == TRUE ]] +epprd_rg:process_resources[3747] exit 0 +epprd_rg:rg_move_complete[455] STATUS=0 +epprd_rg:rg_move_complete[456] : The exit status of process_resources is: 0 +epprd_rg:rg_move_complete[461] unset AM_SYNC_CALLED_BY +epprd_rg:rg_move_complete[462] [[ TRUE == TRUE ]] +epprd_rg:rg_move_complete[491] [[ -z '' ]] +epprd_rg:rg_move_complete[493] RESOURCE_GROUPS=epprd_rg +epprd_rg:rg_move_complete[499] GROUPNAME=epprd_rg +epprd_rg:rg_move_complete[499] export GROUPNAME +epprd_rg:rg_move_complete[501] cl_rrmethods2call postrg_move +epprd_rg:cl_rrmethods2call[56] version=%I% +epprd_rg:cl_rrmethods2call[84] RRMETHODS='' +epprd_rg:cl_rrmethods2call[85] NEED_RR_ENV_VARS=no +epprd_rg:cl_rrmethods2call[124] NEED_RR_ENV_VARS=yes +epprd_rg:cl_rrmethods2call[129] : Set the '*_REP_RESOURCE' variables if needed. +epprd_rg:cl_rrmethods2call[131] [[ yes == yes ]] +epprd_rg:cl_rrmethods2call[133] cllsres +epprd_rg:cl_rrmethods2call[133] 2> /dev/null +epprd_rg:cl_rrmethods2call[133] eval APPLICATIONS='"epprd_app"' EXPORT_FILESYSTEM='"/board_org' '/sapmnt/EPP"' FILESYSTEM='""' FORCED_VARYON='"false"' FSCHECK_TOOL='"fsck"' FS_BEFORE_IPADDR='"false"' MOUNT_FILESYSTEM='"/board;/board_org"' RECOVERY_METHOD='"sequential"' SERVICE_LABEL='"epprd"' SSA_DISK_FENCING='"false"' VG_AUTO_IMPORT='"false"' VOLUME_GROUP='"datavg"' USERDEFINED_RESOURCES='""' +epprd_rg:cl_rrmethods2call[1] APPLICATIONS=epprd_app +epprd_rg:cl_rrmethods2call[1] EXPORT_FILESYSTEM='/board_org /sapmnt/EPP' +epprd_rg:cl_rrmethods2call[1] FILESYSTEM='' +epprd_rg:cl_rrmethods2call[1] FORCED_VARYON=false +epprd_rg:cl_rrmethods2call[1] FSCHECK_TOOL=fsck +epprd_rg:cl_rrmethods2call[1] FS_BEFORE_IPADDR=false +epprd_rg:cl_rrmethods2call[1] MOUNT_FILESYSTEM='/board;/board_org' +epprd_rg:cl_rrmethods2call[1] RECOVERY_METHOD=sequential +epprd_rg:cl_rrmethods2call[1] SERVICE_LABEL=epprd +epprd_rg:cl_rrmethods2call[1] SSA_DISK_FENCING=false +epprd_rg:cl_rrmethods2call[1] VG_AUTO_IMPORT=false +epprd_rg:cl_rrmethods2call[1] VOLUME_GROUP=datavg +epprd_rg:cl_rrmethods2call[1] USERDEFINED_RESOURCES='' +epprd_rg:cl_rrmethods2call[137] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[142] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[147] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[152] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[157] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[162] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[167] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[172] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[182] [[ -z '' ]] +epprd_rg:cl_rrmethods2call[184] typeset sysmgdata +epprd_rg:cl_rrmethods2call[185] typeset reposmgdata +epprd_rg:cl_rrmethods2call[186] [[ -x /usr/es/sbin/cluster/xd_generic/xd_cli/clxd_list_mg_smit ]] +epprd_rg:cl_rrmethods2call[191] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[191] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[197] echo '' +epprd_rg:cl_rrmethods2call[199] return 0 +epprd_rg:rg_move_complete[501] METHODS='' +epprd_rg:rg_move_complete[516] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. +epprd_rg:rg_move_complete[518] exit 0 Jan 28 2023 19:51:15 EVENT COMPLETED: rg_move_complete epprda 1 0 |2023-01-28T19:51:15|8602|EVENT COMPLETED: rg_move_complete epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-01-28T19:51:15.116688 + echo '|2023-01-28T19:51:15.116688|INFO: rg_move_complete|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log PowerHA SystemMirror Event Summary ---------------------------------------------------------------------------- Serial number for this event: 8602 Event: TE_RG_MOVE_ACQUIRE Start time: Sat Jan 28 19:50:37 2023 End time: Sat Jan 28 19:51:15 2023 Action: Resource: Script Name: ---------------------------------------------------------------------------- Acquiring resource group: epprd_rg process_resources Search on: Sat.Jan.28.19:50:38.KORST.2023.process_resources.epprd_rg.ref Acquiring resource: All_service_addrs acquire_service_addr Search on: Sat.Jan.28.19:50:38.KORST.2023.acquire_service_addr.All_service_addrs.epprd_rg.ref Resource online: All_nonerror_service_addrs acquire_service_addr Search on: Sat.Jan.28.19:50:38.KORST.2023.acquire_service_addr.All_nonerror_service_addrs.epprd_rg.ref Acquiring resource: All_volume_groups cl_activate_vgs Search on: Sat.Jan.28.19:50:39.KORST.2023.cl_activate_vgs.All_volume_groups.epprd_rg.ref Resource online: All_nonerror_volume_groups cl_activate_vgs Search on: Sat.Jan.28.19:50:43.KORST.2023.cl_activate_vgs.All_nonerror_volume_groups.epprd_rg.ref Acquiring resource: All_filesystems cl_activate_fs Search on: Sat.Jan.28.19:50:44.KORST.2023.cl_activate_fs.All_filesystems.epprd_rg.ref Resource online: All_non_error_filesystems cl_activate_fs Search on: Sat.Jan.28.19:50:46.KORST.2023.cl_activate_fs.All_non_error_filesystems.epprd_rg.ref Acquiring resource: All_exports cl_export_fs Search on: Sat.Jan.28.19:50:52.KORST.2023.cl_export_fs.All_exports.epprd_rg.ref Resource online: All_nonerror_exports cl_export_fs Search on: Sat.Jan.28.19:50:52.KORST.2023.cl_export_fs.All_nonerror_exports.epprd_rg.ref Acquiring resource: All_nfs_mounts cl_activate_nfs Search on: Sat.Jan.28.19:50:52.KORST.2023.cl_activate_nfs.All_nfs_mounts.epprd_rg.ref Acquiring resource: All_servers start_server Search on: Sat.Jan.28.19:51:14.KORST.2023.start_server.All_servers.epprd_rg.ref Resource online: All_nonerror_servers start_server Search on: Sat.Jan.28.19:51:14.KORST.2023.start_server.All_nonerror_servers.epprd_rg.ref Resource group online: epprd_rg process_resources Search on: Sat.Jan.28.19:51:14.KORST.2023.process_resources.epprd_rg.ref ---------------------------------------------------------------------------- |EVENT_SUMMARY_START|TE_RG_MOVE_ACQUIRE|2023-01-28T19:50:37|2023-01-28T19:51:15|8602| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:50:38.KORST.2023.process_resources.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:50:38.KORST.2023.acquire_service_addr.All_service_addrs.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:50:38.KORST.2023.acquire_service_addr.All_nonerror_service_addrs.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:50:39.KORST.2023.cl_activate_vgs.All_volume_groups.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:50:43.KORST.2023.cl_activate_vgs.All_nonerror_volume_groups.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:50:44.KORST.2023.cl_activate_fs.All_filesystems.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:50:46.KORST.2023.cl_activate_fs.All_non_error_filesystems.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:50:52.KORST.2023.cl_export_fs.All_exports.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:50:52.KORST.2023.cl_export_fs.All_nonerror_exports.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:50:52.KORST.2023.cl_activate_nfs.All_nfs_mounts.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:51:14.KORST.2023.start_server.All_servers.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:51:14.KORST.2023.start_server.All_nonerror_servers.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Jan.28.19:51:14.KORST.2023.process_resources.epprd_rg.ref.ref| |EVENT_SUMMARY_END| LSNRCTL for IBM/AIX RISC System/6000: Version 11.2.0.3.0 - Production on 28-JAN-2023 19:51:15 Copyright (c) 1991, 2011, Oracle. All rights reserved. Starting /oracle/EPP/112_64/bin/tnslsnr: please wait... TNSLSNR for IBM/AIX RISC System/6000: Version 11.2.0.3.0 - Production System parameter file is /oracle/EPP/112_64/network/admin/listener.ora Log messages written to /oracle/EPP/saptrace/diag/tnslsnr/epprda/listener/alert/log.xml Listening on: (DESCRIPTION=(ADDRESS=(PROTOCOL=ipc)(KEY=EPP.WORLD))) Listening on: (DESCRIPTION=(ADDRESS=(PROTOCOL=ipc)(KEY=EPP))) Listening on: (DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=61.81.244.156)(PORT=1521))) Connecting to (ADDRESS=(PROTOCOL=IPC)(KEY=EPP.WORLD)) STATUS of the LISTENER ------------------------ Alias LISTENER Version TNSLSNR for IBM/AIX RISC System/6000: Version 11.2.0.3.0 - Production Start Date 28-JAN-2023 19:51:15 Uptime 0 days 0 hr. 0 min. 0 sec Trace Level off Security ON: Local OS Authentication SNMP ON Listener Parameter File /oracle/EPP/112_64/network/admin/listener.ora Listener Log File /oracle/EPP/saptrace/diag/tnslsnr/epprda/listener/alert/log.xml Listening Endpoints Summary... (DESCRIPTION=(ADDRESS=(PROTOCOL=ipc)(KEY=EPP.WORLD))) (DESCRIPTION=(ADDRESS=(PROTOCOL=ipc)(KEY=EPP))) (DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=61.81.244.156)(PORT=1521))) Services Summary... Service "EPP" has 1 instance(s). Instance "EPP", status UNKNOWN, has 1 handler(s) for this service... The command completed successfully PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 8604 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_EXTERNAL_RESOURCE_STATE_CHANGE_COMPLETE|2023-01-28T19:51:17|8604| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Jan 28 2023 19:51:17 EVENT START: external_resource_state_change_complete epprda |2023-01-28T19:51:17|8604|EVENT START: external_resource_state_change_complete epprda| :external_resource_state_change_complete[61] version=%I% :external_resource_state_change_complete[64] set -u :external_resource_state_change_complete[66] (( 1 != 1 )) :external_resource_state_change_complete[73] : serial number for this event is 8604 :external_resource_state_change_complete[76] : This is the final info of all RGs: :external_resource_state_change_complete[78] clRGinfo -p -t :external_resource_state_change_complete[78] 2> /dev/null Cluster Name: epprda_cluster Resource Group Name: epprd_rg Node Group State Delayed Timers ---------------------------------------------------------------- --------------- ------------------- epprda ONLINE epprds OFFLINE :external_resource_state_change_complete[80] exit 0 Jan 28 2023 19:51:17 EVENT COMPLETED: external_resource_state_change_complete epprda 0 |2023-01-28T19:51:17|8604|EVENT COMPLETED: external_resource_state_change_complete epprda 0| Checking EPP Database ------------------------------------------- J2EE Database is not available via test See logfile /home/eppadm/JdbcCon.log Running /usr/sap/EPP/SYS/exe/run/startj2eedb Trying to start EPP database ... Log file: /home/eppadm/startdb.log EPP database started /usr/sap/EPP/SYS/exe/run/startj2eedb completed successfully Starting Startup Agent sapstartsrv OK Instance Service on host epprda started ------------------------------------------- starting SAP Instance SCS01 Startup-Log is written to /home/eppadm/startsap_SCS01.log ------------------------------------------- /usr/sap/EPP/SCS01/exe/sapcontrol -prot NI_HTTP -nr 01 -function Start Instance on host epprda started Starting Startup Agent sapstartsrv OK Instance Service on host epprda started ------------------------------------------- starting SAP Instance J00 Startup-Log is written to /home/eppadm/startsap_J00.log ------------------------------------------- /usr/sap/EPP/J00/exe/sapcontrol -prot NI_HTTP -nr 00 -function Start Instance on host epprda started Starting Startup Agent sapstartsrv OK Instance Service on host epprda started ------------------------------------------- starting SAP Instance SMDA97 Startup-Log is written to /home/daaadm/startsap_SMDA97.log ------------------------------------------- /usr/sap/DAA/SMDA97/exe/sapcontrol -prot NI_HTTP -nr 97 -function Start Instance on host epprda started [1] 19661190 /sapmnt/EPP/exe/uc/rs6000_64/log/SLOG10: No such file or directory rslgwr1(11): rstrbopen cannot open SysLog file. SysLog:lIM120230128195247001966100000IC : 0 :SAP Web Dispatcher&epprda.sebang.com&19661190& icxxrout2014 icmbnd: handle for "epprda:80" (on all adapters) successfully sent to server *** SAP Web Dispatcher up and operational (pid: 19661190, HTTP: 80, HTTPS: -) *** Sep 28 2023 15:55:38 EVENT START: admin_op clrm_stop_request 8607 0 |2023-09-28T15:55:38|8607|EVENT START: admin_op clrm_stop_request 8607 0| :admin_op[110] trap sigint_handler INT :admin_op[116] OP_TYPE=clrm_stop_request :admin_op[116] typeset OP_TYPE :admin_op[117] SERIAL=8607 :admin_op[117] typeset -li SERIAL :admin_op[118] INVALID=0 :admin_op[118] typeset -li INVALID The administrator initiated the following action at Thu Sep 28 15:55:38 KORST 2023 Check smit.log and clutils.log for additional details. Stopping PowerHA cluster services on node: epprda in graceful mode... Sep 28 2023 15:55:38 EVENT COMPLETED: admin_op clrm_stop_request 8607 0 0 |2023-09-28T15:55:38|8607|EVENT COMPLETED: admin_op clrm_stop_request 8607 0 0| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 8607 Stop cluster services request with 'Graceful' option received for 'epprda'. Enqueued rg_move release event for resource group epprd_rg. Node Down Completion Event has been enqueued. ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_FAIL_NODE_DEP|2023-09-28T15:55:38|8607| |STOP_CLUSTER_SERVICES|Graceful|epprda| |CLUSTER_RG_MOVE_RELEASE|epprd_rg| |NODE_DOWN_COMPLETE| |EVENT_PREAMBLE_END| Sep 28 2023 15:55:39 EVENT START: node_down epprda graceful |2023-09-28T15:55:39|8607|EVENT START: node_down epprda graceful| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-28T15:55:39.345175 + echo '|2023-09-28T15:55:39.345175|INFO: node_down|epprda|graceful' + 1>> /var/hacmp/availability/clavailability.log :node_down[64] version=%I% :node_down[67] NODENAME=epprda :node_down[67] export NODENAME :node_down[68] PARAM=graceful :node_down[68] export PARAM :node_down[75] STATUS=0 :node_down[75] typeset -li STATUS :node_down[77] AIX_SHUTDOWN=false :node_down[79] set -u :node_down[81] (( 2 < 1 )) :node_down[87] : serial number for this event is 8607 :node_down[91] : Clean up NFS state tracking :node_down[93] UPDATESTATDFILE=/usr/es/sbin/cluster/etc/updatestatd :node_down[94] rm -f /tmp/.RPCLOCKDSTOPPED :node_down[95] rm -f /usr/es/sbin/cluster/etc/updatestatd :node_down[96] UPDATESTATD=0 :node_down[97] export UPDATESTATD :node_down[100] : For RAS debugging, the result of ps -edf is captured at this time :node_down[102] : begin ps -edf :node_down[103] ps -edf UID PID PPID C STIME TTY TIME CMD root 1 0 0 Nov 16 - 1:25 /etc/init root 4260170 6095340 0 Nov 16 - 0:43 /usr/sbin/syslogd root 5046714 1 0 Nov 16 - 0:00 /usr/ccs/bin/shlap64 root 5177846 1 0 Nov 16 - 106:37 /usr/sbin/syncd 60 root 5898680 1 0 Nov 16 - 0:00 /usr/dt/bin/dtlogin -daemon root 5964246 1 0 Nov 16 - 0:00 /usr/lib/errdemon root 6029768 6095340 0 Jan 28 - 0:45 /usr/sbin/snmpd root 6095340 1 0 Nov 16 - 0:01 /usr/sbin/srcmstr root 6226176 6095340 0 Nov 16 - 0:00 /usr/sbin/inetd root 6357492 6095340 0 Nov 16 - 0:00 /usr/sbin/portmap root 6488536 6095340 0 Nov 16 - 4:10 /usr/sbin/xntpd -x root 6816230 6095340 0 Nov 16 - 0:30 /usr/sbin/hostmibd root 6881760 6095340 0 Nov 16 - 1:39 sendmail: accepting connections root 6947294 6095340 0 Nov 16 - 0:30 /usr/sbin/snmpmibd root 7143710 6095340 0 Nov 16 - 2:00 /usr/sbin/aixmibd root 7668214 1 0 Nov 16 - 3:17 /usr/sbin/cron root 7799282 6095340 0 Nov 16 - 5:13 /usr/sbin/aso daemon 7864678 6095340 0 Jan 28 - 0:00 /usr/sbin/rpc.statd -d 0 -t 50 root 7930136 6095340 0 Nov 16 - 0:01 /usr/sbin/qdaemon root 8061186 6095340 0 Nov 16 - 0:00 /usr/sbin/biod 6 root 8126748 1 0 Nov 16 - 0:00 /usr/sbin/uprintfd root 8520102 6095340 0 Nov 16 - 0:00 /usr/sbin/writesrv root 8585542 6095340 0 Nov 16 - 0:00 /usr/sbin/sshd root 8913186 6095340 0 Nov 16 - 0:00 /usr/sbin/pfcdaemon root 13959478 6095340 0 Jan 28 - 5:16 /opt/rsct/bin/rmcd -a IBM.LPCommands -r -S 1500 root 14025136 6095340 0 Nov 16 - 0:00 /usr/sbin/lldpd root 14090674 6095340 0 Nov 16 - 0:00 /usr/sbin/ecpvdpd root 14287294 1 0 Nov 16 - 6:55 /usr/bin/topasrec -L -s 300 -R 1 -r 6 -o /var/perf/daily/ -ypersistent=1 -O type=bin -ystart_time=15:11:38,Nov16,2022 root 14352890 6095340 0 Nov 16 - 0:23 /opt/rsct/bin/IBM.MgmtDomainRMd root 14614934 8585542 0 15:51:02 - 0:00 sshd: root@pts/1 root 14877148 6095340 0 Nov 16 - 0:01 /var/perf/pm/bin/pmperfrec root 15008234 6095340 0 Nov 16 - 1:25 /opt/rsct/bin/IBM.HostRMd root 15073556 6095340 0 Nov 16 - 0:06 /opt/rsct/bin/IBM.ServiceRMd root 15532528 6095340 0 Nov 16 - 0:06 /opt/rsct/bin/IBM.DRMd eppadm 19661190 1 0 Jan 28 - 29:16 /sapmnt/EPP/exe/uc/rs6000_64/sapwebdisp pf=/usr/sap/sapwebdisp/sapwebdisp.pfl root 19988980 1 0 Feb 01 - 77:22 /home/NGFAgent/bin/INWatchDog.exe root 20316440 25690554 0 15:55:38 vty0 0:00 sed -u s/^/epprds: / root 20709822 26804702 0 15:55:38 vty0 0:00 tee -a /tmp/cel37028262_s2.err root 20775334 49152500 0 15:55:39 - 0:00 /bin/ksh93 /usr/es/sbin/cluster/events/node_down epprda graceful root 20972018 6095340 0 Jan 28 - 28:54 /opt/rsct/bin/IBM.ConfigRMd eppadm 21823824 1 0 Jan 28 - 7:37 /usr/sap/EPP/SCS01/exe/sapstartsrv pf=/usr/sap/EPP/SYS/profile/EPP_SCS01_epprd -D root 22085916 1 0 Feb 27 - 0:16 /opt/VRTSpbx/bin/pbx_exchange root 22610296 6095340 0 Jan 28 - 21:20 /opt/rsct/bin/IBM.StorageRMd root 22937978 32702838 0 Feb 08 - 183:39 /opt/vada/agent/collector_aix -c /opt/vada/agent root 23527792 25690554 0 15:55:38 vty0 0:00 cat root 23593352 26804702 0 15:55:38 vty0 0:00 sed -u s/^/epprds: / root 24248652 1 0 Jan 28 - 0:58 /usr/sap/hostctrl/exe/saphostexec pf=/usr/sap/hostctrl/exe/host_profile root 24314162 6095340 0 Jan 28 - 0:00 /usr/sbin/rpc.lockd -d 0 root 24445324 22937978 0 15:55:33 - 0:00 awk {print $5} root 24576344 25297270 0 0:00 daaadm 24772992 1 0 Jan 28 - 0:42 /usr/sap/DAA/SMDA97/exe/sapstartsrv pf=/usr/sap/DAA/SYS/profile/DAA_SMDA97_epprd -D sapadm 24969578 1 0 Jan 28 - 0:07 /usr/sap/hostctrl/exe/sapstartsrv pf=/usr/sap/hostctrl/exe/host_profile -D root 25297270 29098468 0 0:00 root 25625036 26804702 0 15:55:38 vty0 0:00 grep -uv : RETURN_CODE= root 25690554 26804702 0 15:55:38 vty0 0:00 tee -a /tmp/cel37028262_s2.out root 26018190 25690554 0 15:55:38 vty0 0:00 /bin/ksh93 /usr/es/sbin/cluster/cspoc/cdsh /tmp/cel37028262_s2 epprds clstop -N -g eppadm 26083754 1 0 Jan 28 - 19:54 /usr/sap/EPP/J00/exe/sapstartsrv pf=/usr/sap/EPP/SYS/profile/EPP_J00_epprd -D root 26345828 24445324 0 15:55:33 - 0:00 tail -1 root 26607988 1 0 Feb 01 - 58:55 /home/NGFAgent/bin/NGFAgent.exe root 26673452 6095340 0 Jan 28 - 8:02 /usr/es/sbin/cluster/clstrmgr root 26804702 37028262 0 15:55:38 vty0 0:00 /bin/ksh93 /usr/es/sbin/cluster/cspoc/cdsh /tmp/cel37028262_s2 epprds clstop -N -g root 27001308 27984224 0 15:55:38 vty0 0:00 /usr/sbin/clrsh epprds -n /usr/es/sbin/cluster/cspoc/cexec clstop -N -g root 27328812 27394326 0 15:55:36 vty0 0:00 /bin/ksh /usr/es/sbin/cluster/cspoc/fix_args nop cl_clstop -N -cspoc-n epprda,epprds -g root 27394326 50266420 0 15:55:32 vty0 0:00 smitty clstop root 27787662 28246396 0 Jan 28 - 0:15 /usr/sbin/gsclvmd -r 30 -i 300 -t 300 -c 00c44af100004b00000001851e9dc053 -v 0 root 27984224 26018190 0 15:55:38 vty0 0:00 /bin/ksh93 /usr/es/sbin/cluster/utilities/cl_rsh -n epprds /usr/es/sbin/cluster/cspoc/cexec clstop -N -g root 28180804 13959478 0 Jan 28 - 0:05 [trspoolm] root 28246396 6095340 0 Jan 28 - 0:10 /usr/sbin/gsclvmd root 28311944 20775334 0 15:55:39 - 0:00 ps -edf root 28377402 6095340 0 Jan 28 - 0:00 /usr/sbin/nfsd 3891 root 28574156 26673452 0 Jan 28 - 0:00 run_rcovcmd root 28770708 6095340 0 Jan 28 - 0:56 /usr/sbin/clconfd root 28836212 26804702 0 15:55:38 vty0 0:00 cat root 28901690 6095340 0 Jan 28 - 0:00 /usr/sbin/rpc.mountd root 29098468 6095340 0 Jan 28 - 11:58 /usr/sbin/clcomd -d -g root 29163932 6095340 0 Jan 28 - 21:16 /usr/sbin/rsct/bin/hagsd cthags root 30867832 1 0 Feb 27 - 3:58 /usr/openv/netbackup/bin/nbdisco root 31261082 1 0 Feb 27 - 6:44 /usr/openv/netbackup/bin/vnetd -proxy outbound_proxy -number 0 root 31785246 1 0 Feb 27 - 7:52 /usr/openv/netbackup/bin/bpcd -standalone root 32178532 1 0 Feb 27 - 9:58 /usr/openv/netbackup/bin/nbftclnt root 32702838 1 0 Feb 08 - 0:00 /opt/vada/agent/collector_aix -c /opt/vada/agent root 32768294 1 0 Feb 27 - 2:46 /usr/openv/netbackup/bin/vnetd -proxy inbound_proxy -number 0 root 33292612 1 0 Feb 27 - 3:49 /usr/openv/netbackup/bin/vnetd -standalone root 37028262 27328812 0 15:55:36 vty0 0:00 /bin/ksh /usr/es/sbin/cluster/sbin/cl_clstop -cspoc -n epprda,epprds -N -g root 38797762 42991978 0 15:53:31 pts/2 0:00 -ksh root 40108500 49217870 0 15:55:33 - 0:00 /usr/lib/sa/sadc -x abcdkmqrvwy 10 2 root 40763688 45547860 0 13:03:00 pts/3 0:00 -ksh root 42271120 14614934 0 15:51:05 pts/1 0:00 -ksh root 42991978 8585542 0 15:53:29 - 0:00 sshd: root@pts/2 root 44761380 1 0 00:00:00 - 0:00 /usr/bin/topas_nmon -f -d -t -s 300 -c 288 -youtput_dir=/ptf/nmon/epprda -ystart_time=00:00:00,Sep28,2023 root 45547860 8585542 0 13:02:51 - 0:00 sshd: root@pts/3 root 49152500 28574156 4 15:55:39 - 0:00 /usr/es/sbin/cluster/events/cmd/clcallev node_down epprda graceful root 49217870 24445324 0 15:55:33 - 0:00 sar 10 1 root 50266420 1 0 15:54:33 vty0 0:00 -ksh :node_down[104] : end ps -edf :node_down[107] : If RG_DEPENDENCIES is not false, all RG actions are taken via rg_move events. :node_down[109] [[ graceful != forced ]] :node_down[109] [[ TRUE == FALSE ]] :node_down[207] : Processing specific to the local node :node_down[209] [[ epprda == epprda ]] :node_down[212] : Stopping cluster services on epprda with the graceful option :node_down[214] [[ graceful != forced ]] :node_down[219] lsvg -L :node_down[219] lsvg -L -o :node_down[219] paste -s '-d|' - :node_down[219] grep -w -v -x -E 'datavg|caavg_private|rootvg' :node_down[219] INACTIVE_VGS=altinst_rootvg :node_down[222] [[ -n altinst_rootvg ]] :node_down[224] : Found inactive VGs. For those that are online in passive :node_down[225] : mode, remove any read only fencing, then varyoff :node_down:altinst_rootvg[229] PS4_LOOP=altinst_rootvg :node_down:altinst_rootvg[230] lsvg -L altinst_rootvg :node_down:altinst_rootvg[230] 2> /dev/null :node_down:altinst_rootvg[230] grep -i -q passive-only :node_down:altinst_rootvg[272] unset PS4_LOOP :node_down[276] : update the location DB to indicate this node is going down :node_down[278] clchdaemons -r -d clstrmgr_scripts -t resource_locator :node_down[296] [[ -n false ]] :node_down[296] [[ false == true ]] :node_down[305] exit 0 Sep 28 2023 15:55:39 EVENT COMPLETED: node_down epprda graceful 0 |2023-09-28T15:55:39|8607|EVENT COMPLETED: node_down epprda graceful 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-28T15:55:39.505441 + echo '|2023-09-28T15:55:39.505441|INFO: node_down|epprda|graceful|0' + 1>> /var/hacmp/availability/clavailability.log PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 8606 Stop cluster services request with 'Graceful' option received for 'epprds'. Enqueued rg_move release event for resource group epprd_rg. Node Down Completion Event has been enqueued. ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_FAIL_NODE_DEP|2023-09-28T15:55:42|8606| |STOP_CLUSTER_SERVICES|Graceful|epprds| |CLUSTER_RG_MOVE_RELEASE|epprd_rg| |NODE_DOWN_COMPLETE| |EVENT_PREAMBLE_END| Sep 28 2023 15:55:43 EVENT START: node_down epprds graceful |2023-09-28T15:55:43|8606|EVENT START: node_down epprds graceful| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-28T15:55:43.494196 + echo '|2023-09-28T15:55:43.494196|INFO: node_down|epprds|graceful' + 1>> /var/hacmp/availability/clavailability.log :node_down[64] version=%I% :node_down[67] NODENAME=epprds :node_down[67] export NODENAME :node_down[68] PARAM=graceful :node_down[68] export PARAM :node_down[75] STATUS=0 :node_down[75] typeset -li STATUS :node_down[77] AIX_SHUTDOWN=false :node_down[79] set -u :node_down[81] (( 2 < 1 )) :node_down[87] : serial number for this event is 8606 :node_down[91] : Clean up NFS state tracking :node_down[93] UPDATESTATDFILE=/usr/es/sbin/cluster/etc/updatestatd :node_down[94] rm -f /tmp/.RPCLOCKDSTOPPED :node_down[95] rm -f /usr/es/sbin/cluster/etc/updatestatd :node_down[96] UPDATESTATD=0 :node_down[97] export UPDATESTATD :node_down[100] : For RAS debugging, the result of ps -edf is captured at this time :node_down[102] : begin ps -edf :node_down[103] ps -edf UID PID PPID C STIME TTY TIME CMD root 1 0 0 Nov 16 - 1:25 /etc/init root 4260170 6095340 0 Nov 16 - 0:43 /usr/sbin/syslogd root 5046714 1 0 Nov 16 - 0:00 /usr/ccs/bin/shlap64 root 5177846 1 0 Nov 16 - 106:37 /usr/sbin/syncd 60 root 5898680 1 0 Nov 16 - 0:00 /usr/dt/bin/dtlogin -daemon root 5964246 1 0 Nov 16 - 0:00 /usr/lib/errdemon root 6029768 6095340 0 Jan 28 - 0:45 /usr/sbin/snmpd root 6095340 1 0 Nov 16 - 0:01 /usr/sbin/srcmstr root 6226176 6095340 0 Nov 16 - 0:00 /usr/sbin/inetd root 6357492 6095340 0 Nov 16 - 0:00 /usr/sbin/portmap root 6488536 6095340 0 Nov 16 - 4:10 /usr/sbin/xntpd -x root 6816230 6095340 0 Nov 16 - 0:30 /usr/sbin/hostmibd root 6881760 6095340 0 Nov 16 - 1:39 sendmail: accepting connections root 6947294 6095340 0 Nov 16 - 0:30 /usr/sbin/snmpmibd root 7143710 6095340 0 Nov 16 - 2:00 /usr/sbin/aixmibd root 7668214 1 0 Nov 16 - 3:17 /usr/sbin/cron root 7799282 6095340 0 Nov 16 - 5:13 /usr/sbin/aso daemon 7864678 6095340 0 Jan 28 - 0:00 /usr/sbin/rpc.statd -d 0 -t 50 root 7930136 6095340 0 Nov 16 - 0:01 /usr/sbin/qdaemon root 8061186 6095340 0 Nov 16 - 0:00 /usr/sbin/biod 6 root 8126748 1 0 Nov 16 - 0:00 /usr/sbin/uprintfd root 8520102 6095340 0 Nov 16 - 0:00 /usr/sbin/writesrv root 8585542 6095340 0 Nov 16 - 0:00 /usr/sbin/sshd root 8913186 6095340 0 Nov 16 - 0:00 /usr/sbin/pfcdaemon root 13959478 6095340 0 Jan 28 - 5:16 /opt/rsct/bin/rmcd -a IBM.LPCommands -r -S 1500 root 14025136 6095340 0 Nov 16 - 0:00 /usr/sbin/lldpd root 14090674 6095340 0 Nov 16 - 0:00 /usr/sbin/ecpvdpd root 14287294 1 0 Nov 16 - 6:55 /usr/bin/topasrec -L -s 300 -R 1 -r 6 -o /var/perf/daily/ -ypersistent=1 -O type=bin -ystart_time=15:11:38,Nov16,2022 root 14352890 6095340 0 Nov 16 - 0:23 /opt/rsct/bin/IBM.MgmtDomainRMd root 14614934 8585542 0 15:51:02 - 0:00 sshd: root@pts/1 root 14877148 6095340 0 Nov 16 - 0:01 /var/perf/pm/bin/pmperfrec root 15008234 6095340 0 Nov 16 - 1:25 /opt/rsct/bin/IBM.HostRMd root 15073556 6095340 0 Nov 16 - 0:06 /opt/rsct/bin/IBM.ServiceRMd root 15532528 6095340 0 Nov 16 - 0:06 /opt/rsct/bin/IBM.DRMd eppadm 19661190 1 0 Jan 28 - 29:16 /sapmnt/EPP/exe/uc/rs6000_64/sapwebdisp pf=/usr/sap/sapwebdisp/sapwebdisp.pfl root 19988980 1 0 Feb 01 - 77:22 /home/NGFAgent/bin/INWatchDog.exe root 20972018 6095340 0 Jan 28 - 28:54 /opt/rsct/bin/IBM.ConfigRMd eppadm 21823824 1 0 Jan 28 - 7:37 /usr/sap/EPP/SCS01/exe/sapstartsrv pf=/usr/sap/EPP/SYS/profile/EPP_SCS01_epprd -D root 22085916 1 0 Feb 27 - 0:16 /opt/VRTSpbx/bin/pbx_exchange root 22610296 6095340 0 Jan 28 - 21:20 /opt/rsct/bin/IBM.StorageRMd root 22937978 32702838 0 Feb 08 - 183:39 /opt/vada/agent/collector_aix -c /opt/vada/agent root 24248652 1 0 Jan 28 - 0:58 /usr/sap/hostctrl/exe/saphostexec pf=/usr/sap/hostctrl/exe/host_profile root 24314162 6095340 0 Jan 28 - 0:00 /usr/sbin/rpc.lockd -d 0 root 24445324 22937978 0 15:55:33 - 0:00 awk {print $5} root 24576346 25297276 0 0:00 daaadm 24772992 1 0 Jan 28 - 0:42 /usr/sap/DAA/SMDA97/exe/sapstartsrv pf=/usr/sap/DAA/SYS/profile/DAA_SMDA97_epprd -D sapadm 24969578 1 0 Jan 28 - 0:07 /usr/sap/hostctrl/exe/sapstartsrv pf=/usr/sap/hostctrl/exe/host_profile -D root 25297276 29098468 0 0:00 root 25690584 27328818 0 15:55:43 - 0:00 /bin/ksh93 /usr/es/sbin/cluster/events/node_down epprds graceful eppadm 26083754 1 0 Jan 28 - 19:54 /usr/sap/EPP/J00/exe/sapstartsrv pf=/usr/sap/EPP/SYS/profile/EPP_J00_epprd -D root 26345828 24445324 0 15:55:33 - 0:00 tail -1 root 26607988 1 0 Feb 01 - 58:55 /home/NGFAgent/bin/NGFAgent.exe root 26673452 6095340 0 Jan 28 - 8:02 /usr/es/sbin/cluster/clstrmgr root 27001312 29098468 0 0:00 root 27328818 28574156 4 15:55:43 - 0:00 /usr/es/sbin/cluster/events/cmd/clcallev node_down epprds graceful root 27394326 50266420 0 15:55:32 vty0 0:00 smitty clstop root 27787662 28246396 0 Jan 28 - 0:15 /usr/sbin/gsclvmd -r 30 -i 300 -t 300 -c 00c44af100004b00000001851e9dc053 -v 0 root 28180804 13959478 0 Jan 28 - 0:05 [trspoolm] root 28246396 6095340 0 Jan 28 - 0:10 /usr/sbin/gsclvmd root 28377402 6095340 0 Jan 28 - 0:00 /usr/sbin/nfsd 3891 root 28574156 26673452 0 Jan 28 - 0:00 run_rcovcmd root 28770708 6095340 0 Jan 28 - 0:56 /usr/sbin/clconfd root 28901690 6095340 0 Jan 28 - 0:00 /usr/sbin/rpc.mountd root 29098468 6095340 0 Jan 28 - 11:58 /usr/sbin/clcomd -d -g root 29163932 6095340 0 Jan 28 - 21:16 /usr/sbin/rsct/bin/hagsd cthags root 30867832 1 0 Feb 27 - 3:58 /usr/openv/netbackup/bin/nbdisco root 31261082 1 0 Feb 27 - 6:44 /usr/openv/netbackup/bin/vnetd -proxy outbound_proxy -number 0 root 31785246 1 0 Feb 27 - 7:52 /usr/openv/netbackup/bin/bpcd -standalone root 32178532 1 0 Feb 27 - 9:58 /usr/openv/netbackup/bin/nbftclnt root 32702838 1 0 Feb 08 - 0:00 /opt/vada/agent/collector_aix -c /opt/vada/agent root 32768294 1 0 Feb 27 - 2:46 /usr/openv/netbackup/bin/vnetd -proxy inbound_proxy -number 0 root 33292612 1 0 Feb 27 - 3:49 /usr/openv/netbackup/bin/vnetd -standalone root 37028206 25690584 0 15:55:43 - 0:00 ps -edf root 38797762 42991978 0 15:53:31 pts/2 0:00 -ksh root 40108500 49217870 0 15:55:33 - 0:00 /usr/lib/sa/sadc -x abcdkmqrvwy 10 2 root 40763688 45547860 0 13:03:00 pts/3 0:00 -ksh root 42271120 14614934 0 15:51:05 pts/1 0:00 -ksh root 42991978 8585542 0 15:53:29 - 0:00 sshd: root@pts/2 root 44761380 1 0 00:00:00 - 0:00 /usr/bin/topas_nmon -f -d -t -s 300 -c 288 -youtput_dir=/ptf/nmon/epprda -ystart_time=00:00:00,Sep28,2023 root 45547860 8585542 0 13:02:51 - 0:00 sshd: root@pts/3 root 49217870 24445324 0 15:55:33 - 0:00 sar 10 1 root 50266420 1 0 15:54:33 vty0 0:00 -ksh :node_down[104] : end ps -edf :node_down[107] : If RG_DEPENDENCIES is not false, all RG actions are taken via rg_move events. :node_down[109] [[ graceful != forced ]] :node_down[109] [[ TRUE == FALSE ]] :node_down[207] : Processing specific to the local node :node_down[209] [[ epprds == epprda ]] :node_down[284] : epprds, is not the local node, handle fencing for any VGs marked as $'\'CRITICAL\'.' :node_down[286] cl_fence_vg epprds :cl_fence_vg[336] version=%I% :cl_fence_vg[341] : Collect list of disks, for use later :cl_fence_vg[343] lspv :cl_fence_vg[343] lspv_out=$'hdisk0 00c44af155592938 rootvg active \nhdisk1 00c44af11e9e1645 caavg_private active \nhdisk2 00c44af11e8a9c69 datavg concurrent \nhdisk3 00c44af11e8a9cd7 datavg concurrent \nhdisk4 00c44af11e8a9d3c datavg concurrent \nhdisk5 00c44af11e8a9c05 datavg concurrent \nhdisk6 00c44af11e8a9e05 datavg concurrent \nhdisk7 00c44af11e8a9d9f datavg concurrent \nhdisk8 00c44af11e8a9e69 datavg concurrent \nhdisk9 00c4e031763311cc None \nhdisk10 00c44af1822dc243 altinst_rootvg ' :cl_fence_vg[345] [[ -z epprda ]] :cl_fence_vg[354] : Accept a formal parameter of 'name of node that failed' if none were set :cl_fence_vg[355] : in the environment :cl_fence_vg[357] EVENTNODE=epprds :cl_fence_vg[359] [[ -z epprds ]] :cl_fence_vg[368] : An explicit volume group list can be passed after the name of :cl_fence_vg[369] : the node that failed. Pick up any such :cl_fence_vg[371] shift :cl_fence_vg[372] vg_list='' :cl_fence_vg[374] common_groups='' :cl_fence_vg[375] common_critical_vgs='' :cl_fence_vg[377] [[ -z '' ]] :cl_fence_vg[380] : Find all the concurrent resource groups that contain both epprds and epprda :cl_fence_vg[382] clodmget -q 'startup_pref = OAAN' -f group -n HACMPgroup :cl_fence_vg[424] : Look at each of the resource groups in turn to determine what CRITICAL :cl_fence_vg[425] : volume groups the local node epprda share access with epprds :cl_fence_vg[443] : Process the list of common volume groups, :node_down[296] [[ -n false ]] :node_down[296] [[ false == true ]] :node_down[305] exit 0 Sep 28 2023 15:55:43 EVENT COMPLETED: node_down epprds graceful 0 |2023-09-28T15:55:43|8606|EVENT COMPLETED: node_down epprds graceful 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-28T15:55:43.648663 + echo '|2023-09-28T15:55:43.648663|INFO: node_down|epprds|graceful|0' + 1>> /var/hacmp/availability/clavailability.log Sep 28 2023 15:55:45 EVENT START: rg_move_release epprda 1 |2023-09-28T15:55:45|8608|EVENT START: rg_move_release epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-28T15:55:45.887890 + echo '|2023-09-28T15:55:45.887890|INFO: rg_move_release|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_release[+54] [[ high = high ]] :rg_move_release[+54] version=1.6 :rg_move_release[+56] set -u :rg_move_release[+58] [ 2 != 2 ] :rg_move_release[+64] set +u :rg_move_release[+66] clcallev rg_move epprda 1 RELEASE Sep 28 2023 15:55:45 EVENT START: rg_move epprda 1 RELEASE |2023-09-28T15:55:45|8608|EVENT START: rg_move epprda 1 RELEASE| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-09-28T15:55:46.018841 :clevlog[amlog_trace:320] echo '|2023-09-28T15:55:46.018841|INFO: rg_move|epprd_rg|epprda|1|RELEASE' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :rg_move[76] version=%I% :rg_move[86] STATUS=0 :rg_move[88] [[ ! -n '' ]] :rg_move[90] EMULATE=REAL :rg_move[96] set -u :rg_move[98] NODENAME=epprda :rg_move[98] export NODENAME :rg_move[99] RGID=1 :rg_move[100] (( 3 == 3 )) :rg_move[102] ACTION=RELEASE :rg_move[108] : serial number for this event is 8608 :rg_move[112] RG_UP_POSTEVENT_ON_NODE=epprda :rg_move[112] export RG_UP_POSTEVENT_ON_NODE :rg_move[116] clodmget -qid=1 -f group -n HACMPgroup :rg_move[116] eval RGNAME=epprd_rg :rg_move[1] RGNAME=epprd_rg :rg_move[118] UPDATESTATD=0 :rg_move[119] export UPDATESTATD :rg_move[123] RG_MOVE_EVENT=true :rg_move[123] export RG_MOVE_EVENT :rg_move[128] group_state='$RESGRP_epprd_rg_epprda' :rg_move[129] set +u :rg_move[130] eval print '$RESGRP_epprd_rg_epprda' :rg_move[1] print ONLINE :rg_move[130] RG_MOVE_ONLINE=ONLINE :rg_move[130] export RG_MOVE_ONLINE :rg_move[131] set -u :rg_move[132] RG_MOVE_ONLINE=ONLINE :rg_move[139] rm -f /tmp/.NFSSTOPPED :rg_move[140] rm -f /tmp/.RPCLOCKDSTOPPED :rg_move[147] set -a :rg_move[148] clsetenvgrp epprda rg_move epprd_rg :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 :rg_move[148] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[149] RC=0 :rg_move[150] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[1] FORCEDOWN_GROUPS='' :rg_move[2] RESOURCE_GROUPS='' :rg_move[3] HOMELESS_GROUPS='' :rg_move[4] HOMELESS_FOLLOWER_GROUPS='' :rg_move[5] ERRSTATE_GROUPS='' :rg_move[6] PRINCIPAL_ACTIONS='' :rg_move[7] ASSOCIATE_ACTIONS='' :rg_move[8] AUXILLIARY_ACTIONS='' :rg_move[8] SIBLING_GROUPS='' :rg_move[9] SIBLING_NODES_BY_GROUP='' :rg_move[10] SIBLING_ACQUIRING_GROUPS='' :rg_move[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' :rg_move[12] SIBLING_RELEASING_GROUPS='' :rg_move[13] SIBLING_RELEASING_NODES_BY_GROUP='' :rg_move[151] set +a :rg_move[155] (( 0 != 0 )) :rg_move[155] [[ -z epprd_rg ]] :rg_move[164] [[ -z TRUE ]] :rg_move[241] AM_SYNC_CALLED_BY=RG_MOVE :rg_move[241] export AM_SYNC_CALLED_BY :rg_move[242] process_resources :process_resources[3318] version=1.169 :process_resources[3321] STATUS=0 :process_resources[3322] sddsrv_off=FALSE :process_resources[3324] true :process_resources[3326] : call rgpa, and it will tell us what to do next :process_resources[3328] set -a :process_resources[3329] clRGPA :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa 2023-09-28T15:55:46.146237 clrgpa :clRGPA[+55] exit 0 :process_resources[3329] eval JOB_TYPE=RELEASE RESOURCE_GROUPS='"epprd_rg"' PRINCIPAL_ACTION='"RELEASE"' AUXILLIARY_ACTION='"NONE"' :process_resources[1] JOB_TYPE=RELEASE :process_resources[1] RESOURCE_GROUPS=epprd_rg :process_resources[1] PRINCIPAL_ACTION=RELEASE :process_resources[1] AUXILLIARY_ACTION=NONE :process_resources[3330] RC=0 :process_resources[3331] set +a :process_resources[3333] (( 0 != 0 )) :process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3363] INFO_STRING='' +epprd_rg:process_resources[3364] clnodename +epprd_rg:process_resources[3373] ENV_VAR=GROUP_epprd_rg_epprda +epprd_rg:process_resources[3374] eval 'echo $GROUP_epprd_rg_epprda' +epprd_rg:process_resources[1] echo ISUPPREEVENT +epprd_rg:process_resources[3374] read ENV_VAR +epprd_rg:process_resources[3375] [[ ISUPPREEVENT == WILLBEUPPOSTEVENT ]] +epprd_rg:process_resources[3379] [[ ISUPPREEVENT == ISUPPREEVENT ]] +epprd_rg:process_resources[3380] INFO_STRING='|SOURCE=epprda' +epprd_rg:process_resources[3381] IS_SERVICE_START=0 +epprd_rg:process_resources[3373] ENV_VAR=GROUP_epprd_rg_epprds +epprd_rg:process_resources[3374] eval 'echo $GROUP_epprd_rg_epprds' +epprd_rg:process_resources[1] echo +epprd_rg:process_resources[3374] read ENV_VAR +epprd_rg:process_resources[3375] [[ '' == WILLBEUPPOSTEVENT ]] +epprd_rg:process_resources[3379] [[ '' == ISUPPREEVENT ]] +epprd_rg:process_resources[3384] (( 0 == 0 && 1 ==0 )) +epprd_rg:process_resources[3660] set_resource_group_state RELEASING +epprd_rg:process_resources[set_resource_group_state:82] PS4_FUNC=set_resource_group_state +epprd_rg:process_resources[set_resource_group_state:82] typeset PS4_FUNC +epprd_rg:process_resources[set_resource_group_state:83] [[ high == high ]] +epprd_rg:process_resources[set_resource_group_state:83] set -x +epprd_rg:process_resources[set_resource_group_state:84] STAT=0 +epprd_rg:process_resources[set_resource_group_state:85] new_status=RELEASING +epprd_rg:process_resources[set_resource_group_state:89] export GROUPNAME +epprd_rg:process_resources[set_resource_group_state:90] [[ RELEASING != DOWN ]] +epprd_rg:process_resources[set_resource_group_state:92] clchdaemons -d clstrmgr_scripts -t resource_locator -n epprda -o epprd_rg -v RELEASING +epprd_rg:process_resources[set_resource_group_state:100] : Resource Manager Updates +epprd_rg:process_resources[set_resource_group_state:111] amlog_trace '' 'acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:318] clcycle clavailability.log +epprd_rg:process_resources[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:process_resources[amlog_trace:319] cltime +epprd_rg:process_resources[amlog_trace:319] DATE=2023-09-28T15:55:46.194199 +epprd_rg:process_resources[amlog_trace:320] echo '|2023-09-28T15:55:46.194199|INFO: acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:process_resources[set_resource_group_state:112] cl_RMupdate releasing epprd_rg process_resources 2023-09-28T15:55:46.218909 2023-09-28T15:55:46.223580 +epprd_rg:process_resources[set_resource_group_state:153] return 0 +epprd_rg:process_resources[3661] RC=0 +epprd_rg:process_resources[3662] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-09-28T15:55:46.236000 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=APPLICATIONS ACTION=RELEASE ALL_APPLICATIONS='"epprd_app"' RESOURCE_GROUPS='"epprd_rg' '"' MISCDATA='""' +epprd_rg:process_resources[1] JOB_TYPE=APPLICATIONS +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] ALL_APPLICATIONS=epprd_app +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] MISCDATA='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ APPLICATIONS == RELEASE ]] +epprd_rg:process_resources[3360] [[ APPLICATIONS == ONLINE ]] +epprd_rg:process_resources[3549] process_applications RELEASE +epprd_rg:process_resources[process_applications:312] PS4_FUNC=process_applications +epprd_rg:process_resources[process_applications:312] typeset PS4_FUNC +epprd_rg:process_resources[process_applications:313] [[ high == high ]] +epprd_rg:process_resources[process_applications:313] set -x +epprd_rg:process_resources[process_applications:316] : Each subprocess will log to a file with this name and PID +epprd_rg:process_resources[process_applications:318] TMP_FILE=/var/hacmp/log/.process_resources_applications.37028330 +epprd_rg:process_resources[process_applications:318] export TMP_FILE +epprd_rg:process_resources[process_applications:320] rm -f '/var/hacmp/log/.process_resources_applications*' +epprd_rg:process_resources[process_applications:322] WAITPIDS='' +epprd_rg:process_resources[process_applications:323] LPAR_ACQUIRE_FAILED=0 +epprd_rg:process_resources[process_applications:324] LPAR_RELEASE_FAILED=0 +epprd_rg:process_resources[process_applications:325] START_STOP_FAILED=0 +epprd_rg:process_resources[process_applications:326] LIST_OF_APPS=epprd_app +epprd_rg:process_resources[process_applications:329] : Acquire lpar resources in one-shot before starting applications +epprd_rg:process_resources[process_applications:331] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[process_applications:343] (( LPAR_ACQUIRE_FAILED == 0 )) +epprd_rg:process_resources[process_applications:345] : Loop through all groups to start or stop applications +epprd_rg:process_resources[process_applications:348] export GROUPNAME +epprd_rg:process_resources[process_applications:351] : Break out application data +epprd_rg:process_resources[process_applications:353] get_list_head epprd_app +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo epprd_app +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo epprd_app +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[process_applications:353] read LIST_OF_APPLICATIONS_FOR_RG +epprd_rg:process_resources[process_applications:354] get_list_tail epprd_app +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo epprd_app +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[process_applications:354] read ALL_APPLICATIONS +epprd_rg:process_resources[process_applications:356] get_list_head +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[get_list_head:62] echo +epprd_rg:process_resources[process_applications:356] read MISCDATA_FOR_RG +epprd_rg:process_resources[process_applications:357] get_list_tail +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[process_applications:357] read MISCDATA +epprd_rg:process_resources[process_applications:359] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[process_applications:363] TMPLIST='' +epprd_rg:process_resources[process_applications:364] print epprd_app +epprd_rg:process_resources[process_applications:364] set -A appnames epprd_app +epprd_rg:process_resources[process_applications:366] (( cnt=0)) +epprd_rg:process_resources[process_applications:366] (( cnt < 1)) +epprd_rg:process_resources[process_applications:367] TMPLIST='epprd_app ' +epprd_rg:process_resources[process_applications:368] LIST_OF_APPLICATIONS_FOR_RG=epprd_app +epprd_rg:process_resources[process_applications:366] ((cnt++ )) +epprd_rg:process_resources[process_applications:366] (( cnt < 1)) +epprd_rg:process_resources[process_applications:371] LIST_OF_APPLICATIONS_FOR_RG='epprd_app ' +epprd_rg:process_resources[process_applications:374] APPLICATIONS='epprd_app ' +epprd_rg:process_resources[process_applications:374] export APPLICATIONS +epprd_rg:process_resources[process_applications:375] MISC_DATA='' +epprd_rg:process_resources[process_applications:375] export MISC_DATA +epprd_rg:process_resources[process_applications:378] : Now call start_or_stop_applications_for_rg to do the app start/stop. +epprd_rg:process_resources[process_applications:381] start_or_stop_applications_for_rg RELEASE /var/hacmp/log/.process_resources_applications.37028330.epprd_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:248] PS4_FUNC=start_or_stop_applications_for_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:248] typeset PS4_FUNC +epprd_rg:process_resources[start_or_stop_applications_for_rg:249] [[ high == high ]] +epprd_rg:process_resources[start_or_stop_applications_for_rg:249] set -x +epprd_rg:process_resources[start_or_stop_applications_for_rg:251] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[start_or_stop_applications_for_rg:255] cmd_to_execute=stop_server +epprd_rg:process_resources[start_or_stop_applications_for_rg:259] : File name to store our exit status +epprd_rg:process_resources[start_or_stop_applications_for_rg:261] STATUS_FILE=/var/hacmp/log/.process_resources_applications.37028330.epprd_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:264] : Use clcallev to run the event +epprd_rg:process_resources[start_or_stop_applications_for_rg:266] clcallev stop_server 'epprd_app ' +epprd_rg:process_resources[process_applications:384] : Add PID of the last bg start_or_stop_applications_for_rg process to WAITPIDS. +epprd_rg:process_resources[process_applications:386] WAITPIDS=' 25690622' +epprd_rg:process_resources[process_applications:390] : Wait for the start_or_stop_applications_for_rg PIDs to finish. +epprd_rg:process_resources[process_applications:393] wait 25690622 Sep 28 2023 15:55:46 EVENT START: stop_server epprd_app |2023-09-28T15:55:46|8608|EVENT START: stop_server epprd_app | +epprd_rg:stop_server[+59] version=%I% +epprd_rg:stop_server[+62] STATUS=0 +epprd_rg:stop_server[+66] [ ! -n ] +epprd_rg:stop_server[+68] EMULATE=REAL +epprd_rg:stop_server[+71] PROC_RES=false +epprd_rg:stop_server[+75] [[ APPLICATIONS != 0 ]] +epprd_rg:stop_server[+75] [[ APPLICATIONS != GROUP ]] +epprd_rg:stop_server[+76] PROC_RES=true +epprd_rg:stop_server[+79] typeset WPARNAME WPARDIR EXEC +epprd_rg:stop_server[+80] WPARDIR= +epprd_rg:stop_server[+81] EXEC= +epprd_rg:stop_server[+83] typeset -i rc=0 +epprd_rg:stop_server[+84] +epprd_rg:stop_server[+84] clwparname epprd_rg +epprd_rg:clwparname[38] version=1.3.1.1 +epprd_rg:clwparname[44] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clwparname[44] [[ -z '' ]] +epprd_rg:clwparname[44] exit 0 WPARNAME= +epprd_rg:stop_server[+85] rc=0 +epprd_rg:stop_server[+87] set -u +epprd_rg:stop_server[+90] ALLSERVERS=All_servers +epprd_rg:stop_server[+91] [ REAL = EMUL ] +epprd_rg:stop_server[+96] cl_RMupdate resource_releasing All_servers stop_server 2023-09-28T15:55:46.391648 2023-09-28T15:55:46.396108 +epprd_rg:stop_server[+101] (( 0 == 0 )) +epprd_rg:stop_server[+101] [[ -n ]] +epprd_rg:stop_server[+120] +epprd_rg:stop_server[+120] cllsserv -cn epprd_app +epprd_rg:stop_server[+120] cut -d: -f3 STOP=/etc/hacmp/epprd_stop.sh +epprd_rg:stop_server[+121] +epprd_rg:stop_server[+121] cut -d -f1 +epprd_rg:stop_server[+121] echo /etc/hacmp/epprd_stop.sh STOP_SCRIPT=/etc/hacmp/epprd_stop.sh +epprd_rg:stop_server[+123] PATTERN=epprda epprd_app +epprd_rg:stop_server[+123] [[ -n ]] +epprd_rg:stop_server[+123] [[ -z ]] +epprd_rg:stop_server[+123] [[ -x /etc/hacmp/epprd_stop.sh ]] +epprd_rg:stop_server[+133] [ REAL = EMUL ] +epprd_rg:stop_server[+139] amlog_trace Stopping application controller|epprd_app +epprd_rg:stop_server[+55] clcycle clavailability.log +epprd_rg:stop_server[+55] 1> /dev/null 2>& 1 +epprd_rg:stop_server[+55] +epprd_rg:stop_server[+55] cltime DATE=2023-09-28T15:55:46.431999 +epprd_rg:stop_server[+55] echo |2023-09-28T15:55:46.431999|INFO: Stopping application controller|epprd_app +epprd_rg:stop_server[+55] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:stop_server[+140] /etc/hacmp/epprd_stop.sh +epprd_rg:stop_server[+140] ODMDIR=/etc/objrepos #### ##### #### ##### ##### # # # #### # # # # # # # # # ## # # # #### # # # # # # # # # # # # # # # # ##### ##### # # # # # ### # # # # # # # # # ## # # #### # #### # # # # # #### ##### # ###### ####### ###### ###### # # # # # # # # # # # # # # # # # # # # # ##### # # ###### ##### ###### ###### # ####### # # # # # # # # # # # # ##### # # # ####### # # Checking EPP Database ------------------------------------------- J2EE Database is not available via test See logfile /home/eppadm/JdbcCon.log stopping the SAP instance J00 Shutdown-Log is written to /home/eppadm/stopsap_J00.log ------------------------------------------- Instance J00 was not running! stopping the SAP instance SCS01 Shutdown-Log is written to /home/eppadm/stopsap_SCS01.log ------------------------------------------- Instance SCS01 was not running! stopping the SAP instance SMDA97 Shutdown-Log is written to /home/daaadm/stopsap_SMDA97.log ------------------------------------------- Instance SMDA97 was not running! LSNRCTL for IBM/AIX RISC System/6000: Version 11.2.0.3.0 - Production on 28-SEP-2023 15:55:57 Copyright (c) 1991, 2011, Oracle. All rights reserved. Connecting to (ADDRESS=(PROTOCOL=IPC)(KEY=EPP.WORLD)) TNS-12541: TNS:no listener TNS-12560: TNS:protocol adapter error TNS-00511: No listener IBM/AIX RISC System/6000 Error: 2: No such file or directory Connecting to (ADDRESS=(PROTOCOL=IPC)(KEY=EPP)) TNS-12541: TNS:no listener TNS-12560: TNS:protocol adapter error TNS-00511: No listener IBM/AIX RISC System/6000 Error: 2: No such file or directory Connecting to (ADDRESS=(COMMUNITY=SAP.WORLD)(PROTOCOL=TCP)(HOST=epprd)(PORT=1521)) TNS-12541: TNS:no listener TNS-12560: TNS:protocol adapter error TNS-00511: No listener IBM/AIX RISC System/6000 Error: 79: Connection refused +epprd_rg:sh[+1] kill -9 19661190 +epprd_rg:stop_server[+141] rc=0 +epprd_rg:stop_server[+143] (( rc != 0 )) +epprd_rg:stop_server[+151] amlog_trace Stopping application controller|epprd_app +epprd_rg:stop_server[+55] clcycle clavailability.log +epprd_rg:stop_server[+55] 1> /dev/null 2>& 1 +epprd_rg:stop_server[+55] +epprd_rg:stop_server[+55] cltime DATE=2023-09-28T15:55:57.412753 +epprd_rg:stop_server[+55] echo |2023-09-28T15:55:57.412753|INFO: Stopping application controller|epprd_app +epprd_rg:stop_server[+55] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:stop_server[+174] ALLNOERRSERV=All_nonerror_servers +epprd_rg:stop_server[+175] [ REAL = EMUL ] +epprd_rg:stop_server[+180] cl_RMupdate resource_down All_nonerror_servers stop_server 2023-09-28T15:55:57.435399 2023-09-28T15:55:57.439923 +epprd_rg:stop_server[+183] exit 0 Sep 28 2023 15:55:57 EVENT COMPLETED: stop_server epprd_app 0 |2023-09-28T15:55:57|8608|EVENT COMPLETED: stop_server epprd_app 0| +epprd_rg:process_resources[start_or_stop_applications_for_rg:267] RC=0 +epprd_rg:process_resources[start_or_stop_applications_for_rg:269] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[start_or_stop_applications_for_rg:279] (( 0 != 0 )) +epprd_rg:process_resources[start_or_stop_applications_for_rg:291] : Store the result for later accumulation +epprd_rg:process_resources[start_or_stop_applications_for_rg:293] print 'epprd_rg 0' +epprd_rg:process_resources[start_or_stop_applications_for_rg:293] 1>> /var/hacmp/log/.process_resources_applications.37028330.epprd_rg +epprd_rg:process_resources[process_applications:396] : Look at all the status files to see if any were unsuccessful +epprd_rg:process_resources[process_applications:399] cat /var/hacmp/log/.process_resources_applications.37028330.epprd_rg +epprd_rg:process_resources[process_applications:399] read skip SUCCESS rest +epprd_rg:process_resources[process_applications:401] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:411] rm -f /var/hacmp/log/.process_resources_applications.37028330.epprd_rg +epprd_rg:process_resources[process_applications:416] : Release lpar resources in one-shot now that applications are stopped +epprd_rg:process_resources[process_applications:418] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[process_applications:420] GROUPNAME=epprd_rg +epprd_rg:process_resources[process_applications:420] export GROUPNAME +epprd_rg:process_resources[process_applications:421] clmanageroha -o release -s -l epprd_app +epprd_rg:process_resources[process_applications:421] 3>& 2 +epprd_rg:clmanageroha[318] : version='@(#)' 5881272 43haes/usr/sbin/cluster/events/clmanageroha.sh, 61aha_r726, 2205A_aha726, May 16 2022 12:15 PM +epprd_rg:clmanageroha[321] clodmget -n -f connection_type HACMPhmcparam +epprd_rg:clmanageroha[321] CONN_TYPE=0 +epprd_rg:clmanageroha[321] typeset -i CONN_TYPE +epprd_rg:clmanageroha[323] clodmget -q name='epprda and object like POWERVS_*' -nf name HACMPnode +epprd_rg:clmanageroha[323] 2> /dev/null +epprd_rg:clmanageroha[323] [[ -n '' ]] +epprd_rg:clmanageroha[326] export CONN_TYPE +epprd_rg:clmanageroha[331] roha_session_open -o release -s -l epprd_app +epprd_rg:clmanageroha[roha_session_open:131] roha_session.id=25690372 +epprd_rg:clmanageroha[roha_session_open:132] date +epprd_rg:clmanageroha[roha_session_open:132] LC_ALL=C +epprd_rg:clmanageroha[roha_session_open:132] roha_session_log 'Open session 25690372 at Thu Sep 28 15:55:57 KORST 2023' [ROHALOG:25690372:(0.067)] Open session 25690372 at Thu Sep 28 15:55:57 KORST 2023 +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:146] roha_session.operation=release +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:143] roha_session.systemmirror_mode=1 +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:149] roha_session.optimal_apps=epprd_app +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:163] [[ release != @(acquire|release|adjust) ]] +epprd_rg:clmanageroha[roha_session_open:168] no_roha_apps=0 +epprd_rg:clmanageroha[roha_session_open:168] typeset -i no_roha_apps +epprd_rg:clmanageroha[roha_session_open:169] need_explicit_res_rel=0 +epprd_rg:clmanageroha[roha_session_open:169] typeset -i need_explicit_res_rel +epprd_rg:clmanageroha[roha_session_open:187] [[ -n epprd_app ]] +epprd_rg:clmanageroha[roha_session_open:187] clmgr q roha +epprd_rg:clmanageroha[roha_session_open:187] sort +epprd_rg:clmanageroha[roha_session_open:187] uniq -d +epprd_rg:clmanageroha[roha_session_open:187] sort -u +epprd_rg:clmanageroha[roha_session_open:187] echo epprd_app +epprd_rg:clmanageroha[roha_session_open:187] echo '\nepprd_app' +epprd_rg:clmanageroha[roha_session_open:187] [[ -z '' ]] +epprd_rg:clmanageroha[roha_session_open:189] roha_session_log 'INFO: No ROHA configured on applications.\n' [ROHALOG:25690372:(0.463)] INFO: No ROHA configured on applications. [ROHALOG:25690372:(0.463)] +epprd_rg:clmanageroha[roha_session_open:190] no_roha_apps=1 +epprd_rg:clmanageroha[roha_session_open:195] read_tunables +epprd_rg:clmanageroha[roha_session_open:196] echo '' +epprd_rg:clmanageroha[roha_session_open:196] grep -q epprda +epprd_rg:clmanageroha[roha_session_open:197] (( 1 == 0 )) +epprd_rg:clmanageroha[roha_session_open:202] (( 1 == 1 )) +epprd_rg:clmanageroha[roha_session_open:203] roha_session_read_odm_dynresop DLPAR_MEM +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_MEM -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:203] (( 0 == 0.00 )) +epprd_rg:clmanageroha[roha_session_open:204] roha_session_read_odm_dynresop DLPAR_PROCS +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_PROCS -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:204] (( 0 == 0 )) +epprd_rg:clmanageroha[roha_session_open:205] roha_session_read_odm_dynresop DLPAR_PROC_UNITS +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_PROC_UNITS -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:205] (( 0 == 0.00 )) +epprd_rg:clmanageroha[roha_session_open:206] roha_session_log 'INFO: Nothing to be done.\n' [ROHALOG:25690372:(0.516)] INFO: Nothing to be done. [ROHALOG:25690372:(0.516)] +epprd_rg:clmanageroha[roha_session_open:207] exit 0 +epprd_rg:process_resources[process_applications:422] RC=0 +epprd_rg:process_resources[process_applications:423] (( 0 != 0 )) +epprd_rg:process_resources[process_applications:433] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:434] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:435] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:439] return 0 +epprd_rg:process_resources[3550] RC=0 +epprd_rg:process_resources[3551] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3553] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-09-28T15:55:58.054229 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=MOUNT_FILESYSTEMS ACTION=RELEASE FILE_SYSTEMS='"/board;/board_org"' RESOURCE_GROUPS='"epprd_rg' '"' NFS_NETWORKS='""' NFS_HOSTS='""' IP_LABELS='""' +epprd_rg:process_resources[1] JOB_TYPE=MOUNT_FILESYSTEMS +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] FILE_SYSTEMS='/board;/board_org' +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] NFS_NETWORKS='' +epprd_rg:process_resources[1] NFS_HOSTS='' +epprd_rg:process_resources[1] IP_LABELS='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ MOUNT_FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources[3360] [[ MOUNT_FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources[3612] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[3616] unmount_nfs_filesystems +epprd_rg:process_resources[unmount_nfs_filesystems:1397] PS4_FUNC=unmount_nfs_filesystems +epprd_rg:process_resources[unmount_nfs_filesystems:1397] typeset PS4_FUNC +epprd_rg:process_resources[unmount_nfs_filesystems:1398] [[ high == high ]] +epprd_rg:process_resources[unmount_nfs_filesystems:1398] set -x +epprd_rg:process_resources[unmount_nfs_filesystems:1400] STAT=0 +epprd_rg:process_resources[unmount_nfs_filesystems:1402] cl_deactivate_nfs +epprd_rg:cl_deactivate_nfs[+75] [[ high == high ]] +epprd_rg:cl_deactivate_nfs[+75] version=1.2.5.1 $Source$ +epprd_rg:cl_deactivate_nfs[+77] STATUS=0 +epprd_rg:cl_deactivate_nfs[+78] PIDLIST= +epprd_rg:cl_deactivate_nfs[+80] set -u +epprd_rg:cl_deactivate_nfs[+154] PROC_RES=false +epprd_rg:cl_deactivate_nfs[+158] [[ MOUNT_FILESYSTEMS != 0 ]] +epprd_rg:cl_deactivate_nfs[+158] [[ MOUNT_FILESYSTEMS != GROUP ]] +epprd_rg:cl_deactivate_nfs[+159] PROC_RES=true +epprd_rg:cl_deactivate_nfs[+175] export GROUPNAME +epprd_rg:cl_deactivate_nfs[+175] [[ true == true ]] +epprd_rg:cl_deactivate_nfs[+178] read UNSORTED_FILELIST +epprd_rg:cl_deactivate_nfs[+178] get_list_head /board;/board_org +epprd_rg:cl_deactivate_nfs[+179] read FILE_SYSTEMS +epprd_rg:cl_deactivate_nfs[+179] get_list_tail /board;/board_org +epprd_rg:cl_deactivate_nfs[+186] +epprd_rg:cl_deactivate_nfs[+186] /bin/sort -r +epprd_rg:cl_deactivate_nfs[+186] /bin/echo /board;/board_org FILELIST=/board;/board_org +epprd_rg:cl_deactivate_nfs[+188] grep -q \;/ +epprd_rg:cl_deactivate_nfs[+188] echo /board;/board_org +epprd_rg:cl_deactivate_nfs[+189] CROSSMOUNT=1 +epprd_rg:cl_deactivate_nfs[+189] [[ 1 != 0 ]] +epprd_rg:cl_deactivate_nfs[+194] +epprd_rg:cl_deactivate_nfs[+194] /bin/sort -k 1,1r -t; +epprd_rg:cl_deactivate_nfs[+194] /bin/echo /board;/board_org MNT=/board;/board_org +epprd_rg:cl_deactivate_nfs[+200] ALLNFS=All_nfs_mounts +epprd_rg:cl_deactivate_nfs[+201] cl_RMupdate resource_releasing All_nfs_mounts cl_deactivate_nfs 2023-09-28T15:55:58.104950 2023-09-28T15:55:58.109468 +epprd_rg:cl_deactivate_nfs[+203] +epprd_rg:cl_deactivate_nfs[+203] odmget -q name=RECOVERY_METHOD AND group=epprd_rg HACMPresource +epprd_rg:cl_deactivate_nfs[+203] grep value +epprd_rg:cl_deactivate_nfs[+203] sed s/"//g +epprd_rg:cl_deactivate_nfs[+203] awk {print $3} METHOD=sequential +epprd_rg:cl_deactivate_nfs[+206] typeset PS4_LOOP=/board;/board_org +epprd_rg:cl_deactivate_nfs:/board;/board_org[+207] (( 1 != 0 )) +epprd_rg:cl_deactivate_nfs:/board;/board_org[+209] +epprd_rg:cl_deactivate_nfs:/board;/board_org[+209] cut -f2 -d; +epprd_rg:cl_deactivate_nfs:/board;/board_org[+209] echo /board;/board_org fs=/board_org +epprd_rg:cl_deactivate_nfs:/board;/board_org[+210] +epprd_rg:cl_deactivate_nfs:/board;/board_org[+210] echo /board;/board_org +epprd_rg:cl_deactivate_nfs:/board;/board_org[+210] cut -f1 -d; mnt=/board +epprd_rg:cl_deactivate_nfs:/board;/board_org[+220] +epprd_rg:cl_deactivate_nfs:/board;/board_org[+220] awk -v MFS=/board BEGIN {MFS=sprintf("^%s$", MFS)} \ match($4, "nfs") && match($3, MFS) {print $2} +epprd_rg:cl_deactivate_nfs:/board;/board_org[+220] mount f=/board_org +epprd_rg:cl_deactivate_nfs:/board;/board_org[+220] [[ /board_org == /board_org ]] +epprd_rg:cl_deactivate_nfs:/board;/board_org[+227] pid= +epprd_rg:cl_deactivate_nfs:/board;/board_org[+227] [[ sequential == sequential ]] +epprd_rg:cl_deactivate_nfs:/board;/board_org[+227] [[ rg_move == node_down ]] +epprd_rg:cl_deactivate_nfs:/board;/board_org[+227] [[ rg_move == rg_move ]] +epprd_rg:cl_deactivate_nfs:/board;/board_org[+252] pid=20775220 +epprd_rg:cl_deactivate_nfs:/board;/board_org[+252] [[ -n 20775220 ]] +epprd_rg:cl_deactivate_nfs:/board;/board_org[+251] do_umount /board +epprd_rg:cl_deactivate_nfs(0):/board;/board_org[do_umount+4] typeset fs=/board +epprd_rg:cl_deactivate_nfs(0):/board;/board_org[do_umount+31] cl_nfskill -k -u /board +epprd_rg:cl_deactivate_nfs:/board;/board_org[+264] echo +epprd_rg:cl_deactivate_nfs:/board;/board_org[+264] grep -qw 20775220 +epprd_rg:cl_deactivate_nfs(0):/board;/board_org[do_umount+33] sleep 2 +epprd_rg:cl_deactivate_nfs:/board;/board_org[+267] (( 1 != 0 )) +epprd_rg:cl_deactivate_nfs:/board;/board_org[+268] PIDLIST= 20775220 +epprd_rg:cl_deactivate_nfs:/board;/board_org[+274] unset PS4_LOOP +epprd_rg:cl_deactivate_nfs[+279] wait 20775220 +epprd_rg:cl_deactivate_nfs(2):/board;/board_org[do_umount+34] cl_nfskill -k -u /board +epprd_rg:cl_deactivate_nfs(2):/board;/board_org[do_umount+36] sleep 2 +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+39] amlog_trace Deactivating NFS|/board +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] clcycle clavailability.log +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] cltime DATE=2023-09-28T15:56:02.161105 +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] echo |2023-09-28T15:56:02.161105|INFO: Deactivating NFS|/board +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+40] typeset COUNT=20 +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+41] true +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+42] date +%h %d %H:%M:%S.000 +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+42] : Attempt 1 of 20 to unmount at Sep 28 15:56:02.000 +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+43] umount -f /board forced unmount of /board +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+44] (( 0 != 0 )) +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+61] amlog_trace Deactivating NFS|/board +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] clcycle clavailability.log +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] cltime DATE=2023-09-28T15:56:02.264804 +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] echo |2023-09-28T15:56:02.264804|INFO: Deactivating NFS|/board +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+49] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+62] break +epprd_rg:cl_deactivate_nfs(4):/board;/board_org[do_umount+65] return 0 +epprd_rg:cl_deactivate_nfs[+280] (( 0 != 0 )) +epprd_rg:cl_deactivate_nfs[+291] ALLNOERRNFS=All_nonerror_nfs_mounts +epprd_rg:cl_deactivate_nfs[+292] cl_RMupdate resource_down All_nonerror_nfs_mounts cl_deactivate_nfs 2023-09-28T15:56:02.288815 2023-09-28T15:56:02.293227 +epprd_rg:cl_deactivate_nfs[+295] exit 0 +epprd_rg:process_resources[unmount_nfs_filesystems:1403] RC=0 +epprd_rg:process_resources[unmount_nfs_filesystems:1406] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[unmount_nfs_filesystems:1420] (( 0 != 0 )) +epprd_rg:process_resources[unmount_nfs_filesystems:1426] return 0 +epprd_rg:process_resources[3617] RC=0 +epprd_rg:process_resources[3618] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3620] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-09-28T15:56:02.306387 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=EXPORT_FILESYSTEMS ACTION=RELEASE EXPORT_FILE_SYSTEMS='"/board_org,/sapmnt/EPP"' EXPORT_FILE_SYSTEMS_V4='""' RESOURCE_GROUPS='"epprd_rg' '"' STABLE_STORAGE_PATH='""' IP_LABELS='""' DAEMONS='"NFS' '"' +epprd_rg:process_resources[1] JOB_TYPE=EXPORT_FILESYSTEMS +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] EXPORT_FILE_SYSTEMS=/board_org,/sapmnt/EPP +epprd_rg:process_resources[1] EXPORT_FILE_SYSTEMS_V4='' +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] STABLE_STORAGE_PATH='' +epprd_rg:process_resources[1] IP_LABELS='' +epprd_rg:process_resources[1] DAEMONS='NFS ' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ EXPORT_FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources[3360] [[ EXPORT_FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources[3595] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[3599] unexport_filesystems +epprd_rg:process_resources[unexport_filesystems:1576] PS4_FUNC=unexport_filesystems +epprd_rg:process_resources[unexport_filesystems:1576] typeset PS4_FUNC +epprd_rg:process_resources[unexport_filesystems:1577] [[ high == high ]] +epprd_rg:process_resources[unexport_filesystems:1577] set -x +epprd_rg:process_resources[unexport_filesystems:1578] STAT=0 +epprd_rg:process_resources[unexport_filesystems:1579] NFSSTOPPED=0 +epprd_rg:process_resources[unexport_filesystems:1580] RPCSTOPPED=0 +epprd_rg:process_resources[unexport_filesystems:1582] export NFSSTOPPED +epprd_rg:process_resources[unexport_filesystems:1585] : For NFSv4, cl_unexport_fs will use STABLE_STORAGE_PATH, which is set by +epprd_rg:process_resources[unexport_filesystems:1586] : clRGPA and can have colon-separated values for multiple RGs. +epprd_rg:process_resources[unexport_filesystems:1587] : We will save off clRGPA values in stable_storage_path and then extract +epprd_rg:process_resources[unexport_filesystems:1588] : each RG into STABLE_STORAGE_PATH for cl_unexport_fs. +epprd_rg:process_resources[unexport_filesystems:1590] stable_storage_path='' +epprd_rg:process_resources[unexport_filesystems:1590] typeset stable_storage_path +epprd_rg:process_resources[unexport_filesystems:1594] export GROUPNAME +epprd_rg:process_resources[unexport_filesystems:1596] get_list_head /board_org,/sapmnt/EPP +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo /board_org,/sapmnt/EPP +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo /board_org,/sapmnt/EPP +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[unexport_filesystems:1596] read LIST_OF_EXPORT_FILE_SYSTEMS_FOR_RG +epprd_rg:process_resources[unexport_filesystems:1597] get_list_tail /board_org,/sapmnt/EPP +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo /board_org,/sapmnt/EPP +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[unexport_filesystems:1597] read EXPORT_FILE_SYSTEMS +epprd_rg:process_resources[unexport_filesystems:1599] get_list_head +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[unexport_filesystems:1599] read LIST_OF_EXPORT_FILE_SYSTEMS_V4_FOR_RG +epprd_rg:process_resources[unexport_filesystems:1600] get_list_tail +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[unexport_filesystems:1600] read EXPORT_FILE_SYSTEMS_V4 +epprd_rg:process_resources[unexport_filesystems:1601] get_list_head +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[unexport_filesystems:1601] read STABLE_STORAGE_PATH +epprd_rg:process_resources[unexport_filesystems:1602] get_list_tail +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[unexport_filesystems:1602] read stable_storage_path +epprd_rg:process_resources[unexport_filesystems:1604] cl_unexport_fs '/board_org /sapmnt/EPP' '' +epprd_rg:cl_unexport_fs[136] version=%I% +epprd_rg:cl_unexport_fs[139] . /usr/es/sbin/cluster/events/utils/cl_nfs_utils +epprd_rg:cl_unexport_fs[98] PROGNAME=cl_unexport_fs +epprd_rg:cl_unexport_fs[99] [[ high == high ]] +epprd_rg:cl_unexport_fs[101] set -x +epprd_rg:cl_unexport_fs[102] version=%I +epprd_rg:cl_unexport_fs[105] cl_exports_data='' +epprd_rg:cl_unexport_fs[105] typeset cl_exports_data +epprd_rg:cl_unexport_fs[106] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_unexport_fs[141] UNEXPORT_V3='/board_org /sapmnt/EPP' +epprd_rg:cl_unexport_fs[142] UNEXPORT_V4='' +epprd_rg:cl_unexport_fs[144] STATUS=0 +epprd_rg:cl_unexport_fs[146] PROC_RES=false +epprd_rg:cl_unexport_fs[150] [[ EXPORT_FILESYSTEMS != 0 ]] +epprd_rg:cl_unexport_fs[150] [[ EXPORT_FILESYSTEMS != GROUP ]] +epprd_rg:cl_unexport_fs[151] PROC_RES=true +epprd_rg:cl_unexport_fs[154] set -u +epprd_rg:cl_unexport_fs[156] (( 2 != 2 )) +epprd_rg:cl_unexport_fs[162] [[ __AIX__ == __AIX__ ]] +epprd_rg:cl_unexport_fs[164] oslevel -r +epprd_rg:cl_unexport_fs[164] cut -c1-2 +epprd_rg:cl_unexport_fs[164] (( 72 > 52 )) +epprd_rg:cl_unexport_fs[166] FORCE=-F +epprd_rg:cl_unexport_fs[180] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_unexport_fs[181] DARE_EVENT=reconfig_resource_release +epprd_rg:cl_unexport_fs[184] unexport_v4='' +epprd_rg:cl_unexport_fs[185] [[ -z '' ]] +epprd_rg:cl_unexport_fs[185] [[ rg_move == reconfig_resource_release ]] +epprd_rg:cl_unexport_fs[196] [[ -z '' ]] +epprd_rg:cl_unexport_fs[196] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_unexport_fs[198] unexport_v3='' +epprd_rg:cl_unexport_fs[204] getline_exports /board_org +epprd_rg:cl_unexport_fs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_unexport_fs[getline_exports:45] line='' +epprd_rg:cl_unexport_fs[getline_exports:45] typeset line +epprd_rg:cl_unexport_fs[getline_exports:46] flag=0 +epprd_rg:cl_unexport_fs[getline_exports:46] typeset -i flag +epprd_rg:cl_unexport_fs[getline_exports:47] fs=/board_org +epprd_rg:cl_unexport_fs[getline_exports:49] [[ -z /board_org ]] +epprd_rg:cl_unexport_fs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_unexport_fs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_unexport_fs[getline_exports:56] read -r line +epprd_rg:cl_unexport_fs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_unexport_fs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_unexport_fs[getline_exports:69] (( 1 == 0 )) +epprd_rg:cl_unexport_fs[getline_exports:74] [[ 0 == 1 ]] +epprd_rg:cl_unexport_fs[getline_exports:56] read -r line +epprd_rg:cl_unexport_fs[getline_exports:59] line='/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_unexport_fs[getline_exports:60] line='/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_unexport_fs[getline_exports:63] [[ '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' == #* ]] +epprd_rg:cl_unexport_fs[getline_exports:68] echo '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_unexport_fs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_unexport_fs[getline_exports:69] (( 0 == 0 )) +epprd_rg:cl_unexport_fs[getline_exports:71] flag=1 +epprd_rg:cl_unexport_fs[getline_exports:74] [[ 1 == 1 ]] +epprd_rg:cl_unexport_fs[getline_exports:76] echo '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_unexport_fs[getline_exports:76] grep -w '\\' +epprd_rg:cl_unexport_fs[getline_exports:76] [[ -n '' ]] +epprd_rg:cl_unexport_fs[getline_exports:81] cl_exports_data=' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_unexport_fs[getline_exports:82] break +epprd_rg:cl_unexport_fs[getline_exports:89] return 0 +epprd_rg:cl_unexport_fs[205] export_line=' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_unexport_fs[210] echo /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds +epprd_rg:cl_unexport_fs[210] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_unexport_fs[211] cut -d- -f2- +epprd_rg:cl_unexport_fs[211] tr , ' ' +epprd_rg:cl_unexport_fs[210] options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap:epprd:epprda:epprds root=epprdap:epprd:epprda:epprds ' +epprd_rg:cl_unexport_fs[217] vers_missing=1 +epprd_rg:cl_unexport_fs[240] (( vers_missing )) +epprd_rg:cl_unexport_fs[240] unexport_v3=' /board_org' +epprd_rg:cl_unexport_fs[204] getline_exports /sapmnt/EPP +epprd_rg:cl_unexport_fs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_unexport_fs[getline_exports:45] line='' +epprd_rg:cl_unexport_fs[getline_exports:45] typeset line +epprd_rg:cl_unexport_fs[getline_exports:46] flag=0 +epprd_rg:cl_unexport_fs[getline_exports:46] typeset -i flag +epprd_rg:cl_unexport_fs[getline_exports:47] fs=/sapmnt/EPP +epprd_rg:cl_unexport_fs[getline_exports:49] [[ -z /sapmnt/EPP ]] +epprd_rg:cl_unexport_fs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_unexport_fs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_unexport_fs[getline_exports:56] read -r line +epprd_rg:cl_unexport_fs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_unexport_fs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:68] grep -q '^[[:space:]]*/sapmnt/EPP[[:space:]]' +epprd_rg:cl_unexport_fs[getline_exports:69] (( 0 == 0 )) +epprd_rg:cl_unexport_fs[getline_exports:71] flag=1 +epprd_rg:cl_unexport_fs[getline_exports:74] [[ 1 == 1 ]] +epprd_rg:cl_unexport_fs[getline_exports:76] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:76] grep -w '\\' +epprd_rg:cl_unexport_fs[getline_exports:76] [[ -n '' ]] +epprd_rg:cl_unexport_fs[getline_exports:81] cl_exports_data=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[getline_exports:82] break +epprd_rg:cl_unexport_fs[getline_exports:89] return 0 +epprd_rg:cl_unexport_fs[205] export_line=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[210] echo /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_unexport_fs[210] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_unexport_fs[211] cut -d- -f2- +epprd_rg:cl_unexport_fs[211] tr , ' ' +epprd_rg:cl_unexport_fs[210] options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap root=epprdap ' +epprd_rg:cl_unexport_fs[217] vers_missing=1 +epprd_rg:cl_unexport_fs[240] (( vers_missing )) +epprd_rg:cl_unexport_fs[240] unexport_v3=' /board_org /sapmnt/EPP' +epprd_rg:cl_unexport_fs[243] UNEXPORT_V3=' /board_org /sapmnt/EPP' +epprd_rg:cl_unexport_fs[244] UNEXPORT_V4='' +epprd_rg:cl_unexport_fs[247] hasrv='' +epprd_rg:cl_unexport_fs[249] [[ -z '' ]] +epprd_rg:cl_unexport_fs[251] query=name='STABLE_STORAGE_PATH AND group=epprd_rg' +epprd_rg:cl_unexport_fs[252] odmget -q name='STABLE_STORAGE_PATH AND group=epprd_rg' HACMPresource +epprd_rg:cl_unexport_fs[252] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_unexport_fs[252] STABLE_STORAGE_PATH='' +epprd_rg:cl_unexport_fs[256] [[ -z '' ]] +epprd_rg:cl_unexport_fs[258] STABLE_STORAGE_PATH=/var/adm/nfsv4.hacmp/epprd_rg +epprd_rg:cl_unexport_fs[261] [[ -z '' ]] +epprd_rg:cl_unexport_fs[263] query=name='SERVICE_LABEL AND group=epprd_rg' +epprd_rg:cl_unexport_fs[264] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_unexport_fs[264] odmget -q name='SERVICE_LABEL AND group=epprd_rg' HACMPresource +epprd_rg:cl_unexport_fs[264] SERVICE_LABEL=epprd +epprd_rg:cl_unexport_fs[268] ps -eo args +epprd_rg:cl_unexport_fs[268] grep -w nfsd +epprd_rg:cl_unexport_fs[268] grep -qw -- '-gp on' +epprd_rg:cl_unexport_fs[272] gp=off +epprd_rg:cl_unexport_fs[275] /usr/sbin/bootinfo -K +epprd_rg:cl_unexport_fs[275] KERNEL_BITS=64 +epprd_rg:cl_unexport_fs[277] [[ off == on ]] +epprd_rg:cl_unexport_fs[282] NFSv4_REGISTERED=0 +epprd_rg:cl_unexport_fs[286] V3=:2:3 +epprd_rg:cl_unexport_fs[287] V4=:4 +epprd_rg:cl_unexport_fs[289] [[ rg_move != reconfig_resource_release ]] +epprd_rg:cl_unexport_fs[290] [[ rg_move != release_vg_fs ]] +epprd_rg:cl_unexport_fs[298] [[ -n '' ]] +epprd_rg:cl_unexport_fs[321] V3='' +epprd_rg:cl_unexport_fs[322] V4='' +epprd_rg:cl_unexport_fs[326] ALLEXPORTS=All_exports +epprd_rg:cl_unexport_fs[328] cl_RMupdate resource_releasing All_exports cl_unexport_fs 2023-09-28T15:56:02.576988 2023-09-28T15:56:02.581461 +epprd_rg:cl_unexport_fs[330] echo /board_org /sapmnt/EPP +epprd_rg:cl_unexport_fs[330] tr ' ' '\n' +epprd_rg:cl_unexport_fs[330] sort +epprd_rg:cl_unexport_fs[330] FILESYSTEM_LIST=$'/board_org\n/sapmnt/EPP' +epprd_rg:cl_unexport_fs[334] v3='' +epprd_rg:cl_unexport_fs[335] v4='' +epprd_rg:cl_unexport_fs[336] root='' +epprd_rg:cl_unexport_fs[337] old_options='' +epprd_rg:cl_unexport_fs[338] new_options='' +epprd_rg:cl_unexport_fs[340] exportfs +epprd_rg:cl_unexport_fs[340] grep '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_unexport_fs[340] export_line='/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_unexport_fs[342] [[ -z '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' ]] +epprd_rg:cl_unexport_fs[344] echo /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds +epprd_rg:cl_unexport_fs[344] cut -d- -f2- +epprd_rg:cl_unexport_fs[344] cut '-d ' -f2- +epprd_rg:cl_unexport_fs[344] tr , ' ' +epprd_rg:cl_unexport_fs[344] old_options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap:epprd:epprda:epprds root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_unexport_fs[365] new_options=,sec=sys:krb5p:krb5i:krb5:dh +epprd_rg:cl_unexport_fs[365] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw +epprd_rg:cl_unexport_fs[365] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds +epprd_rg:cl_unexport_fs[365] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds +epprd_rg:cl_unexport_fs[371] [[ -z '' ]] +epprd_rg:cl_unexport_fs[371] v3='' +epprd_rg:cl_unexport_fs[377] NFS_VER3='' +epprd_rg:cl_unexport_fs[380] [[ /board_org == /board_org ]] +epprd_rg:cl_unexport_fs[380] v3='' +epprd_rg:cl_unexport_fs[380] NFS_VER3=3 +epprd_rg:cl_unexport_fs[380] break +epprd_rg:cl_unexport_fs[382] NFS_VER4='' +epprd_rg:cl_unexport_fs[387] [[ '' == 4 ]] +epprd_rg:cl_unexport_fs[400] echo '' +epprd_rg:cl_unexport_fs[400] cut -d: -f2- +epprd_rg:cl_unexport_fs[400] vers='' +epprd_rg:cl_unexport_fs[402] [[ -z '' ]] +epprd_rg:cl_unexport_fs[404] [[ '' == 4 ]] +epprd_rg:cl_unexport_fs[408] exportfs -i -u -F /board_org exportfs: unexported /board_org +epprd_rg:cl_unexport_fs[410] (( 0 != 0 )) +epprd_rg:cl_unexport_fs[417] continue +epprd_rg:cl_unexport_fs[334] v3='' +epprd_rg:cl_unexport_fs[335] v4='' +epprd_rg:cl_unexport_fs[336] root='' +epprd_rg:cl_unexport_fs[337] old_options='' +epprd_rg:cl_unexport_fs[338] new_options='' +epprd_rg:cl_unexport_fs[340] exportfs +epprd_rg:cl_unexport_fs[340] grep '^[[:space:]]*/sapmnt/EPP[[:space:]]' +epprd_rg:cl_unexport_fs[340] export_line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_unexport_fs[342] [[ -z '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' ]] +epprd_rg:cl_unexport_fs[344] echo /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_unexport_fs[344] cut '-d ' -f2- +epprd_rg:cl_unexport_fs[344] cut -d- -f2- +epprd_rg:cl_unexport_fs[344] tr , ' ' +epprd_rg:cl_unexport_fs[344] old_options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap root=epprdap' +epprd_rg:cl_unexport_fs[365] new_options=,sec=sys:krb5p:krb5i:krb5:dh +epprd_rg:cl_unexport_fs[365] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw +epprd_rg:cl_unexport_fs[365] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap +epprd_rg:cl_unexport_fs[365] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_unexport_fs[371] [[ -z '' ]] +epprd_rg:cl_unexport_fs[371] v3='' +epprd_rg:cl_unexport_fs[377] NFS_VER3='' +epprd_rg:cl_unexport_fs[380] [[ /sapmnt/EPP == /board_org ]] +epprd_rg:cl_unexport_fs[380] [[ /sapmnt/EPP == /sapmnt/EPP ]] +epprd_rg:cl_unexport_fs[380] v3='' +epprd_rg:cl_unexport_fs[380] NFS_VER3=3 +epprd_rg:cl_unexport_fs[380] break +epprd_rg:cl_unexport_fs[382] NFS_VER4='' +epprd_rg:cl_unexport_fs[387] [[ '' == 4 ]] +epprd_rg:cl_unexport_fs[400] echo '' +epprd_rg:cl_unexport_fs[400] cut -d: -f2- +epprd_rg:cl_unexport_fs[400] vers='' +epprd_rg:cl_unexport_fs[402] [[ -z '' ]] +epprd_rg:cl_unexport_fs[404] [[ '' == 4 ]] +epprd_rg:cl_unexport_fs[408] exportfs -i -u -F /sapmnt/EPP exportfs: unexported /sapmnt/EPP +epprd_rg:cl_unexport_fs[410] (( 0 != 0 )) +epprd_rg:cl_unexport_fs[417] continue +epprd_rg:cl_unexport_fs[452] [[ -n '' ]] +epprd_rg:cl_unexport_fs[480] ALLNOERREXPORT=All_nonerror_exports +epprd_rg:cl_unexport_fs[482] cl_RMupdate resource_down All_nonerror_exports cl_unexport_fs 2023-09-28T15:56:02.667163 2023-09-28T15:56:02.671659 +epprd_rg:cl_unexport_fs[484] exit 0 +epprd_rg:process_resources[unexport_filesystems:1608] return 0 +epprd_rg:process_resources[3600] RC=0 +epprd_rg:process_resources[3601] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3603] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-09-28T15:56:02.685001 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=FILESYSTEMS ACTION=RELEASE FILE_SYSTEMS='"/usr/sap,/sapmnt,/oracle/EPP/sapdata4,/oracle/EPP/sapdata3,/oracle/EPP/sapdata2,/oracle/EPP/sapdata1,/oracle/EPP/origlogB,/oracle/EPP/origlogA,/oracle/EPP/oraarch,/oracle/EPP/mirrlogB,/oracle/EPP/mirrlogA,/oracle/EPP,/oracle,/board_org"' RESOURCE_GROUPS='"epprd_rg' '"' FSCHECK_TOOLS='""' RECOVERY_METHODS='"sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential"' +epprd_rg:process_resources[1] JOB_TYPE=FILESYSTEMS +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] FILE_SYSTEMS=/usr/sap,/sapmnt,/oracle/EPP/sapdata4,/oracle/EPP/sapdata3,/oracle/EPP/sapdata2,/oracle/EPP/sapdata1,/oracle/EPP/origlogB,/oracle/EPP/origlogA,/oracle/EPP/oraarch,/oracle/EPP/mirrlogB,/oracle/EPP/mirrlogA,/oracle/EPP,/oracle,/board_org +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] FSCHECK_TOOLS='' +epprd_rg:process_resources[1] RECOVERY_METHODS=sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources[3360] [[ FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources[3482] process_file_systems RELEASE +epprd_rg:process_resources[process_file_systems:2640] PS4_FUNC=process_file_systems +epprd_rg:process_resources[process_file_systems:2640] typeset PS4_FUNC +epprd_rg:process_resources[process_file_systems:2641] [[ high == high ]] +epprd_rg:process_resources[process_file_systems:2641] set -x +epprd_rg:process_resources[process_file_systems:2643] STAT=0 +epprd_rg:process_resources[process_file_systems:2645] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[process_file_systems:2667] cl_deactivate_fs +epprd_rg:cl_deactivate_fs[860] version=1.6 +epprd_rg:cl_deactivate_fs[863] STATUS=0 +epprd_rg:cl_deactivate_fs[863] typeset -li STATUS +epprd_rg:cl_deactivate_fs[864] SLEEP=1 +epprd_rg:cl_deactivate_fs[864] typeset -li SLEEP +epprd_rg:cl_deactivate_fs[865] LIMIT=60 +epprd_rg:cl_deactivate_fs[865] typeset -li LIMIT +epprd_rg:cl_deactivate_fs[866] export SLEEP +epprd_rg:cl_deactivate_fs[867] export LIMIT +epprd_rg:cl_deactivate_fs[868] TMP_FILENAME=_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs[870] (( 0 != 0 )) +epprd_rg:cl_deactivate_fs[875] OEM_CALL=false +epprd_rg:cl_deactivate_fs[879] : Check here to see if the forced unmount option can be used +epprd_rg:cl_deactivate_fs[881] FORCE_OK='' +epprd_rg:cl_deactivate_fs[881] export FORCE_OK +epprd_rg:cl_deactivate_fs[882] O_FlAG='' +epprd_rg:cl_deactivate_fs[882] export O_FlAG +epprd_rg:cl_deactivate_fs[885] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_deactivate_fs[886] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_deactivate_fs[887] : 99.99.999.999 +epprd_rg:cl_deactivate_fs[889] typeset -li V R M F +epprd_rg:cl_deactivate_fs[890] typeset -Z2 R +epprd_rg:cl_deactivate_fs[891] typeset -Z3 M +epprd_rg:cl_deactivate_fs[892] typeset -Z3 F +epprd_rg:cl_deactivate_fs[893] jfs2_lvl=601002000 +epprd_rg:cl_deactivate_fs[893] typeset -li jfs2_lvl +epprd_rg:cl_deactivate_fs[894] fuser_lvl=601004000 +epprd_rg:cl_deactivate_fs[894] typeset -li fuser_lvl +epprd_rg:cl_deactivate_fs[895] VRMF=0 +epprd_rg:cl_deactivate_fs[895] typeset -li VRMF +epprd_rg:cl_deactivate_fs[898] : Here try and figure out what level of JFS2 is installed +epprd_rg:cl_deactivate_fs[900] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_deactivate_fs[900] cut -f3 -d: +epprd_rg:cl_deactivate_fs[900] read V R M F +epprd_rg:cl_deactivate_fs[900] IFS=. +epprd_rg:cl_deactivate_fs[901] VRMF=702005102 +epprd_rg:cl_deactivate_fs[903] (( 702005102 >= 601002000 )) +epprd_rg:cl_deactivate_fs[906] : JFS2 at this level that supports forced unmount +epprd_rg:cl_deactivate_fs[908] FORCE_OK=true +epprd_rg:cl_deactivate_fs[911] (( 702005102 >= 601004000 )) +epprd_rg:cl_deactivate_fs[914] : fuser at this level supports the -O flag +epprd_rg:cl_deactivate_fs[916] O_FLAG=-O +epprd_rg:cl_deactivate_fs[920] : if JOB_TYPE is set and is not GROUP, then process_resources is parent +epprd_rg:cl_deactivate_fs[922] [[ FILESYSTEMS != 0 ]] +epprd_rg:cl_deactivate_fs[922] [[ FILESYSTEMS != GROUP ]] +epprd_rg:cl_deactivate_fs[923] deactivate_fs_process_resources +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:705] STATUS=0 +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:705] typeset -li STATUS +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:708] : for the temp file, just take the first rg name +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:710] print epprd_rg +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:710] cut -f 1 -d ' ' +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:710] read RES_GRP +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:711] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:714] : Remove the status file if already exists +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:716] rm -f /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:719] : go through all resource groups +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:721] pid_list='' +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:724] export GROUPNAME +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:725] export RECOVERY_METHOD +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:728] : Get a reverse sorted list of the filesystems in this RG so that they +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:729] : release in opposite order of mounting. This is needed for nested mounts. +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:731] print /usr/sap,/sapmnt,/oracle/EPP/sapdata4,/oracle/EPP/sapdata3,/oracle/EPP/sapdata2,/oracle/EPP/sapdata1,/oracle/EPP/origlogB,/oracle/EPP/origlogA,/oracle/EPP/oraarch,/oracle/EPP/mirrlogB,/oracle/EPP/mirrlogA,/oracle/EPP,/oracle,/board_org +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:731] read LIST_OF_FILE_SYSTEMS_FOR_RG FILE_SYSTEMS +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:731] IFS=: +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:732] print /usr/sap,/sapmnt,/oracle/EPP/sapdata4,/oracle/EPP/sapdata3,/oracle/EPP/sapdata2,/oracle/EPP/sapdata1,/oracle/EPP/origlogB,/oracle/EPP/origlogA,/oracle/EPP/oraarch,/oracle/EPP/mirrlogB,/oracle/EPP/mirrlogA,/oracle/EPP,/oracle,/board_org +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:732] tr , '\n' +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:732] sort -ru +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:732] LIST_OF_FILE_SYSTEMS_FOR_RG=$'/usr/sap\n/sapmnt\n/oracle/EPP/sapdata4\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata1\n/oracle/EPP/origlogB\n/oracle/EPP/origlogA\n/oracle/EPP/oraarch\n/oracle/EPP/mirrlogB\n/oracle/EPP/mirrlogA\n/oracle/EPP\n/oracle\n/board_org' +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:733] find_nested_mounts $'/usr/sap\n/sapmnt\n/oracle/EPP/sapdata4\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata1\n/oracle/EPP/origlogB\n/oracle/EPP/origlogA\n/oracle/EPP/oraarch\n/oracle/EPP/mirrlogB\n/oracle/EPP/mirrlogA\n/oracle/EPP\n/oracle\n/board_org' +epprd_rg:cl_deactivate_fs[find_nested_mounts:88] given_fs_list=$'/usr/sap\n/sapmnt\n/oracle/EPP/sapdata4\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata1\n/oracle/EPP/origlogB\n/oracle/EPP/origlogA\n/oracle/EPP/oraarch\n/oracle/EPP/mirrlogB\n/oracle/EPP/mirrlogA\n/oracle/EPP\n/oracle\n/board_org' +epprd_rg:cl_deactivate_fs[find_nested_mounts:88] typeset given_fs_list +epprd_rg:cl_deactivate_fs[find_nested_mounts:90] typeset first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:91] mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:91] mount_out=$' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:91] typeset mount_out +epprd_rg:cl_deactivate_fs[find_nested_mounts:92] discovered_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:92] typeset discovered_fs +epprd_rg:cl_deactivate_fs[find_nested_mounts:93] typeset line fs nested_fs +epprd_rg:cl_deactivate_fs[find_nested_mounts:94] typeset mounted_fs_list +epprd_rg:cl_deactivate_fs[find_nested_mounts:96] fs_count=0 +epprd_rg:cl_deactivate_fs[find_nested_mounts:96] typeset -li fs_count +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /usr/sap +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/saplv /usr/sap jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/saplv /usr/sap jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /sapmnt +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/sapmntlv /sapmnt jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/sapmntlv /sapmnt jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle/EPP +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=$' /dev/epplv /oracle/EPP jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- $' /dev/epplv /oracle/EPP jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 10' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 10 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] echo $' /dev/epplv /oracle/EPP jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/epplv /oracle/EPP jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:128] [[ jfs2 == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n '' ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/mirrlogA == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/mirrlogAlv /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/mirrlogA ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/mirrlogB == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/mirrlogBlv /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/mirrlogB ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/oraarch == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/oraarchlv /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/oraarch ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/origlogA == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/origlogAlv /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/origlogA ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/origlogB == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/origlogBlv /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/origlogB ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata1 == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata1lv /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata1 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata2 == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata2lv /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata2 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata3 == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata3lv /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata3 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata4 == /oracle/EPP/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata4lv /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata4 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /oracle +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=$' /dev/oraclelv /oracle jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- $' /dev/oraclelv /oracle jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 11' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 11 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] echo $' /dev/oraclelv /oracle jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/oraclelv /oracle jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:128] [[ jfs2 == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n '' ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/epplv /oracle/EPP jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/epplv /oracle/EPP +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/mirrlogA == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/mirrlogAlv /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/mirrlogA ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/mirrlogB == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/mirrlogBlv /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/mirrlogB ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/oraarch == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/oraarchlv /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/oraarch ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/origlogA == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/origlogAlv /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/origlogA ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/origlogB == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/origlogBlv /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/origlogB ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata1 == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata1lv /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata1 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata2 == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata2lv /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata2 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata3 == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata3lv /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata3 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:107] : The lines can be of one of two forms, depending on +epprd_rg:cl_deactivate_fs[find_nested_mounts:108] : whether this is a local mount or an NFS mount +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] print '/dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:118] read first second third fourth rest +epprd_rg:cl_deactivate_fs[find_nested_mounts:119] nested_fs='' +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ /oracle/EPP/sapdata4 == /oracle/* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:120] [[ jfs2 == jfs* ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:123] : The mount output is of the form +epprd_rg:cl_deactivate_fs[find_nested_mounts:124] : lv_name lower_mount_point ... +epprd_rg:cl_deactivate_fs[find_nested_mounts:125] : /dev/sapdata4lv /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:127] nested_fs=/oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:138] [[ -n /oracle/EPP/sapdata4 ]] +epprd_rg:cl_deactivate_fs[find_nested_mounts:141] : Record new nested file system /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs[find_nested_mounts:143] discovered_fs=' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[find_nested_mounts:104] read line +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] print -- $' node mounted mounted over vfs date options \n-------- --------------- --------------- ------ ------------ --------------- \n /dev/hd4 / jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd2 /usr jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd9var /var jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd3 /tmp jfs2 Nov 16 15:10 rw,log=/dev/hd8 \n /dev/hd1 /home jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/hd11admin /admin jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /proc /proc procfs Nov 16 15:11 rw \n /dev/hd10opt /opt jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/livedump /var/adm/ras/livedump jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /dev/ptflv /ptf jfs2 Nov 16 15:11 rw,log=/dev/hd8 \n /ahafs /aha ahafs Nov 16 15:11 rw \nepdev /sapcd /sapcd nfs3 Jan 28 17:37 bg,soft,intr,sec=sys,rw\n /dev/boardlv /board_org jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraclelv /oracle jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/epplv /oracle/EPP jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogAlv /oracle/EPP/mirrlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/mirrlogBlv /oracle/EPP/mirrlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/oraarchlv /oracle/EPP/oraarch jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogAlv /oracle/EPP/origlogA jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/origlogBlv /oracle/EPP/origlogB jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata1lv /oracle/EPP/sapdata1 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata2lv /oracle/EPP/sapdata2 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata3lv /oracle/EPP/sapdata3 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapdata4lv /oracle/EPP/sapdata4 jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/sapmntlv /sapmnt jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv\n /dev/saplv /usr/sap jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] grep -w /board_org +epprd_rg:cl_deactivate_fs[find_nested_mounts:100] mounted_fs_list=' /dev/boardlv /board_org jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] print -- ' /dev/boardlv /board_org jfs2 Jan 28 19:50 rw,log=/dev/epprdaloglv' +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] wc -l +epprd_rg:cl_deactivate_fs[find_nested_mounts:101] fs_count=' 1' +epprd_rg:cl_deactivate_fs[find_nested_mounts:102] (( 1 > 1 )) +epprd_rg:cl_deactivate_fs[find_nested_mounts:150] : Pass comprehensive list to stdout, sorted to get correct unmount order +epprd_rg:cl_deactivate_fs[find_nested_mounts:152] print -- $'/usr/sap\n/sapmnt\n/oracle/EPP/sapdata4\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata1\n/oracle/EPP/origlogB\n/oracle/EPP/origlogA\n/oracle/EPP/oraarch\n/oracle/EPP/mirrlogB\n/oracle/EPP/mirrlogA\n/oracle/EPP\n/oracle\n/board_org' ' /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[find_nested_mounts:152] tr ' ' '\n' +epprd_rg:cl_deactivate_fs[find_nested_mounts:152] sort -ru +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:733] LIST_OF_FILE_SYSTEMS_FOR_RG=$'/usr/sap\n/sapmnt\n/oracle/EPP/sapdata4\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata1\n/oracle/EPP/origlogB\n/oracle/EPP/origlogA\n/oracle/EPP/oraarch\n/oracle/EPP/mirrlogB\n/oracle/EPP/mirrlogA\n/oracle/EPP\n/oracle\n/board_org' +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:736] : Get the recovery method used for all filesystems in this resource group +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:738] print sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:738] read RECOVERY_METHOD RECOVERY_METHODS +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:738] IFS=: +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:739] print sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:739] cut -f 1 -d , +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:739] RECOVERY_METHOD=sequential +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:742] : verify the recovery method +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:744] RECOVERY_METHOD=sequential +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:745] RECOVERY_METHOD=sequential +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:747] [[ sequential != sequential ]] +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:754] : Tell the cluster manager what we are going to do +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:756] ALLFS=All_filesystems +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:757] cl_RMupdate resource_releasing All_filesystems cl_deactivate_fs 2023-09-28T15:56:02.985051 2023-09-28T15:56:02.989553 +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:760] : now that all variables are set, perform the umounts +epprd_rg:cl_deactivate_fs:/usr/sap[deactivate_fs_process_resources:764] PS4_LOOP=/usr/sap +epprd_rg:cl_deactivate_fs:/usr/sap[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/usr/sap[deactivate_fs_process_resources:770] fs_umount /usr/sap cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.294)[fs_umount:313] FS=/usr/sap +epprd_rg:cl_deactivate_fs(0.294)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(0.294)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(0.294)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(0.294)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(0.294)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(0.294)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(0.316)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(0.316)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(0.316)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(0.316)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(0.316)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(0.316)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(0.316)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(0.316)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(0.316)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(0.316)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(0.316)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(0.316)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(0.316)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(0.316)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(0.316)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(0.316)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(0.316)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(0.317)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(0.319)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/usr/sap +epprd_rg:cl_deactivate_fs(0.324)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(0.324)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(0.324)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(0.324)[fs_umount:367] lsfs -c /usr/sap +epprd_rg:cl_deactivate_fs(0.327)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/usr/sap:/dev/saplv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(0.327)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(0.328)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/usr/sap:/dev/saplv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(0.330)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(0.332)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(0.332)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(0.332)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(0.332)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(0.332)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(0.333)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(0.334)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(0.335)[fs_umount:394] awk '{ if ( $1 == "/dev/saplv" ) print $2 }' +epprd_rg:cl_deactivate_fs(0.340)[fs_umount:394] FS_MOUNTED=/usr/sap +epprd_rg:cl_deactivate_fs(0.340)[fs_umount:395] [[ -n /usr/sap ]] +epprd_rg:cl_deactivate_fs(0.340)[fs_umount:397] [[ /usr/sap != /usr/sap ]] +epprd_rg:cl_deactivate_fs(0.340)[fs_umount:409] [[ /usr/sap == / ]] +epprd_rg:cl_deactivate_fs(0.340)[fs_umount:409] [[ /usr/sap == /usr ]] +epprd_rg:cl_deactivate_fs(0.340)[fs_umount:409] [[ /usr/sap == /dev ]] +epprd_rg:cl_deactivate_fs(0.340)[fs_umount:409] [[ /usr/sap == /proc ]] +epprd_rg:cl_deactivate_fs(0.340)[fs_umount:409] [[ /usr/sap == /var ]] +epprd_rg:cl_deactivate_fs(0.340)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/usr/sap' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:03.067835 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:03.067835|INFO: Deactivating Filesystem|/usr/sap' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(0.372)[fs_umount:427] : Try up to 60 times to unmount /usr/sap +epprd_rg:cl_deactivate_fs(0.372)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(0.372)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(0.372)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(0.374)[fs_umount:432] : Attempt 1 of 60 to unmount at Sep 28 15:56:03.000 +epprd_rg:cl_deactivate_fs(0.374)[fs_umount:434] umount /usr/sap umount: error unmounting /dev/saplv: Device busy +epprd_rg:cl_deactivate_fs(1.446)[fs_umount:442] : At this point, unmount of /usr/sap has not worked. Attempt a SIGKILL to +epprd_rg:cl_deactivate_fs(1.446)[fs_umount:443] : all processes having open file descriptors on this LV and FS. +epprd_rg:cl_deactivate_fs(1.446)[fs_umount:445] date '+%h %d %H:%M:%S.000' Sep 28 15:56:04.000 +epprd_rg:cl_deactivate_fs(1.449)[fs_umount:453] clodmget -n -q name=MOUNT_FILESYSTEM -f group HACMPresource +epprd_rg:cl_deactivate_fs(1.453)[fs_umount:453] crossmount_rg=epprd_rg +epprd_rg:cl_deactivate_fs(1.453)[fs_umount:457] clodmget -n -f value -q group='epprd_rg and name=FS_BEFORE_IPADDR' HACMPresource +epprd_rg:cl_deactivate_fs(1.456)[fs_umount:457] [[ false == true ]] +epprd_rg:cl_deactivate_fs(1.457)[fs_umount:468] mount +epprd_rg:cl_deactivate_fs(1.457)[fs_umount:468] LC_ALL=C +epprd_rg:cl_deactivate_fs(1.458)[fs_umount:468] grep -iq nfs +epprd_rg:cl_deactivate_fs(1.458)[fs_umount:468] awk '$4~ /nfs/ { print $4} ' +epprd_rg:cl_deactivate_fs(1.461)[fs_umount:469] (( 0 == 0 )) +epprd_rg:cl_deactivate_fs(1.462)[fs_umount:470] disable_procfile_debug=true +epprd_rg:cl_deactivate_fs(1.462)[fs_umount:475] : Record the open files on /dev/saplv and /usr/sap, and the processes that we are +epprd_rg:cl_deactivate_fs(1.462)[fs_umount:476] : about to kill. +epprd_rg:cl_deactivate_fs(1.462)[fs_umount:478] fuser -O -u -x /dev/saplv +epprd_rg:cl_deactivate_fs(1.463)[fs_umount:478] 2> /dev/null +epprd_rg:cl_deactivate_fs(1.484)[fs_umount:478] pidlist=' 21823824 24248652 24772992 24969578 26083754' +epprd_rg:cl_deactivate_fs(1.484)[fs_umount:482] : Process 21823824 has open files on /usr/sap. Record information about this +epprd_rg:cl_deactivate_fs(1.484)[fs_umount:483] : process in case anyone is later suprised by this action. +epprd_rg:cl_deactivate_fs(1.484)[fs_umount:485] ps ewwww 21823824 PID TTY STAT TIME COMMAND 21823824 - A 7:37 /usr/sap/EPP/SCS01/exe/sapstartsrv pf=/usr/sap/EPP/SYS/profile/EPP_SCS01_epprd -D _=/usr/sap/EPP/SCS01/exe/sapstartsrv LANG=en_US THREAD=NOPS LOGIN=eppadm CLCMD_PASSTHRU=1 PATH=/oracle/EPP/112_64/bin:/usr/bin:/etc:/usr/sbin:/usr/ucb:/home/eppadm/bin:/usr/bin/X11:/sbin:.:/usr/sap/EPP/SYS/exe/uc/rs6000_64:/usr/sap/EPP/SYS/exe/run:/home/eppadm NLS_LANG=AMERICAN_AMERICA.UTF8 EXTENDED_HISTORY=ON RSEC_SSFS_KEYPATH=/usr/sap/EPP/SYS/global/security/rsecssfs/key ORACLE_BASE=/oracle LC__FASTMSG=true LOGNAME=eppadm MAIL=/var/spool/mail/eppadm dbs_ora_tnsname=EPP ORACLE_SID=EPP LOCPATH=/usr/lib/nls/loc DB_SID=EPP DIR_LIBRARY=/usr/sap/EPP/SCS01/exe USER=eppadm AUTHSTATE=files TNS_ADMIN=/usr/sap/EPP/SYS/profile/oracle IBM_JAVA_OPTIONS=-Xmx256M -Xj9 SAPEXE=/usr/sap/EPP/SYS/exe/run SHELL=/bin/csh ODMDIR=/etc/objrepos CLUSTER_OVERRIDE=yes HISTSIZE=10000 SAPDATA_HOME=/oracle/EPP RSEC_SSFS_DATAPATH=/usr/sap/EPP/SYS/global/security/rsecssfs/data HOME=/home/eppadm TERM=dumb MAILMSG=[YOU HAVE NEW MAIL] ORACLE_HOME=/oracle/EPP/112_64 rsdb_ssfs_connect=0 PWD=/usr/sap/EPP/SYS/profile TZ=KORST-9 dbms_type=ORA SAPSYSTEMNAME=EPP NLSPATH=/usr/lib/nls/msg/%L/%N:/usr/lib/nls/msg/%L/%N.cat:/usr/lib/nls/msg/%l.%c/%N:/usr/lib/nls/msg/%l.%c/%N.cat LIBPATH=/usr/sap/EPP/SCS01/exe:/usr/sap/EPP/SCS01/exe:/usr/sap/EPP/SCS01/exe:/usr/sap/EPP/SYS/exe/run:/usr/sap/EPP/SYS/exe/uc/rs6000_64:/oracle/client/11x_64/instantclient +epprd_rg:cl_deactivate_fs(1.491)[fs_umount:486] [[ true != true ]] +epprd_rg:cl_deactivate_fs(1.491)[fs_umount:482] : Process 24248652 has open files on /usr/sap. Record information about this +epprd_rg:cl_deactivate_fs(1.491)[fs_umount:483] : process in case anyone is later suprised by this action. +epprd_rg:cl_deactivate_fs(1.491)[fs_umount:485] ps ewwww 24248652 PID TTY STAT TIME COMMAND 24248652 - A 0:58 /usr/sap/hostctrl/exe/saphostexec pf=/usr/sap/hostctrl/exe/host_profile _=/usr/sap/hostctrl/exe/hostexecstart LANG=en_US THREAD=NOPS LOGIN=eppadm CLCMD_PASSTHRU=1 PATH=/usr/bin:/bin:/usr/sbin:/sbin:/oracle/EPP/112_64/bin:/etc:/usr/ucb:/home/eppadm/bin:/usr/bin/X11:.:/usr/sap/EPP/SYS/exe/uc/rs6000_64:/usr/sap/EPP/SYS/exe/run:/home/eppadm: NLS_LANG=AMERICAN_AMERICA.UTF8 EXTENDED_HISTORY=ON RSEC_SSFS_KEYPATH=/usr/sap/EPP/SYS/global/security/rsecssfs/key ORACLE_BASE=/oracle LC__FASTMSG=true LOGNAME=eppadm MAIL=/var/spool/mail/eppadm dbs_ora_tnsname=EPP ORACLE_SID=EPP LOCPATH=/usr/lib/nls/loc DB_SID=EPP DIR_LIBRARY=/usr/sap/EPP/SYS/exe/run USER=eppadm AUTHSTATE=files TNS_ADMIN=/usr/sap/EPP/SYS/profile/oracle IBM_JAVA_OPTIONS=-Xmx256M -Xj9 SAPEXE=/usr/sap/EPP/SYS/exe/run SHELL=/bin/csh ODMDIR=/etc/objrepos CLUSTER_OVERRIDE=yes HISTSIZE=10000 SAPDATA_HOME=/oracle/EPP RSEC_SSFS_DATAPATH=/usr/sap/EPP/SYS/global/security/rsecssfs/data HOME=/home/eppadm TERM=dumb MAILMSG=[YOU HAVE NEW MAIL] ORACLE_HOME=/oracle/EPP/112_64 rsdb_ssfs_connect=0 PWD=/home/eppadm TZ=KORST-9 dbms_type=ORA SAPSYSTEMNAME=EPP NODNSSAPTRANSHOST=1 LIBPATH=/usr/sap/hostctrl/exe +epprd_rg:cl_deactivate_fs(1.498)[fs_umount:486] [[ true != true ]] +epprd_rg:cl_deactivate_fs(1.498)[fs_umount:482] : Process 24772992 has open files on /usr/sap. Record information about this +epprd_rg:cl_deactivate_fs(1.498)[fs_umount:483] : process in case anyone is later suprised by this action. +epprd_rg:cl_deactivate_fs(1.498)[fs_umount:485] ps ewwww 24772992 PID TTY STAT TIME COMMAND 24772992 - A 0:42 /usr/sap/DAA/SMDA97/exe/sapstartsrv pf=/usr/sap/DAA/SYS/profile/DAA_SMDA97_epprd -D _=/usr/sap/DAA/SMDA97/exe/sapstartsrv LANG=en_US LOGIN=daaadm CLCMD_PASSTHRU=1 PATH=/usr/bin:/etc:/usr/sbin:/usr/ucb:/usr/bin/X11:/sbin:/usr/java8_64/jre/bin:/usr/java8_64/bin:/usr/sap/DAA/SYS/exe/uc/rs6000_64:/usr/sap/DAA/SYS/exe/run:/home/daaadm:. EXTENDED_HISTORY=ON RSEC_SSFS_KEYPATH=/usr/sap/DAA/SYS/global/security/rsecssfs/key LC__FASTMSG=true LOGNAME=daaadm LOCPATH=/usr/lib/nls/loc DIR_LIBRARY=/usr/sap/DAA/SMDA97/exe USER=daaadm AUTHSTATE=files IBM_JAVA_OPTIONS=-Xmx256M -Xj9 SHELL=/bin/csh ODMDIR=/etc/objrepos CLUSTER_OVERRIDE=yes HISTSIZE=10000 RSEC_SSFS_DATAPATH=/usr/sap/DAA/SYS/global/security/rsecssfs/data HOME=/home/daaadm TERM=dumb rsdb_ssfs_connect=0 PWD=/usr/sap/DAA/SYS/profile TZ=KORST-9 SAPSYSTEMNAME=DAA NLSPATH=/usr/lib/nls/msg/%L/%N:/usr/lib/nls/msg/%L/%N.cat:/usr/lib/nls/msg/%l.%c/%N:/usr/lib/nls/msg/%l.%c/%N.cat LIBPATH=/usr/sap/DAA/SMDA97/exe:/usr/sap/DAA/SMDA97/exe:/usr/sap/DAA/SMDA97/exe:/usr/sap/DAA/SYS/exe/run:/usr/sap/DAA/SYS/exe/uc/rs6000_64 +epprd_rg:cl_deactivate_fs(1.505)[fs_umount:486] [[ true != true ]] +epprd_rg:cl_deactivate_fs(1.505)[fs_umount:482] : Process 24969578 has open files on /usr/sap. Record information about this +epprd_rg:cl_deactivate_fs(1.505)[fs_umount:483] : process in case anyone is later suprised by this action. +epprd_rg:cl_deactivate_fs(1.505)[fs_umount:485] ps ewwww 24969578 PID TTY STAT TIME COMMAND 24969578 - A 0:07 /usr/sap/hostctrl/exe/sapstartsrv pf=/usr/sap/hostctrl/exe/host_profile -D _=/usr/sap/hostctrl/exe/hostexecstart LANG=en_US THREAD=NOPS LOGIN=eppadm CLCMD_PASSTHRU=1 PATH=/usr/bin:/bin:/usr/sbin:/sbin:/oracle/EPP/112_64/bin:/etc:/usr/ucb:/home/eppadm/bin:/usr/bin/X11:.:/usr/sap/EPP/SYS/exe/uc/rs6000_64:/usr/sap/EPP/SYS/exe/run:/home/eppadm: NLS_LANG=AMERICAN_AMERICA.UTF8 EXTENDED_HISTORY=ON RSEC_SSFS_KEYPATH=/usr/sap/EPP/SYS/global/security/rsecssfs/key ORACLE_BASE=/oracle LC__FASTMSG=true LOGNAME=sapadm MAIL=/var/spool/mail/eppadm dbs_ora_tnsname=EPP ORACLE_SID=EPP LOCPATH=/usr/lib/nls/loc DB_SID=EPP DIR_LIBRARY=/usr/sap/EPP/SYS/exe/run USER=sapadm AUTHSTATE=files TNS_ADMIN=/usr/sap/EPP/SYS/profile/oracle IBM_JAVA_OPTIONS=-Xmx256M -Xj9 SAPEXE=/usr/sap/EPP/SYS/exe/run SHELL=/bin/csh ODMDIR=/etc/objrepos CLUSTER_OVERRIDE=yes HISTSIZE=10000 SAPDATA_HOME=/oracle/EPP RSEC_SSFS_DATAPATH=/usr/sap/EPP/SYS/global/security/rsecssfs/data HOME=/home/sapadm TERM=dumb MAILMSG=[YOU HAVE NEW MAIL] ORACLE_HOME=/oracle/EPP/112_64 rsdb_ssfs_connect=0 PWD=/home/eppadm TZ=KORST-9 dbms_type=ORA SAPSYSTEMNAME=EPP NODNSSAPTRANSHOST=1 USERNAME=sapadm LIBPATH=/usr/sap/hostctrl/exe +epprd_rg:cl_deactivate_fs(1.512)[fs_umount:486] [[ true != true ]] +epprd_rg:cl_deactivate_fs(1.512)[fs_umount:482] : Process 26083754 has open files on /usr/sap. Record information about this +epprd_rg:cl_deactivate_fs(1.512)[fs_umount:483] : process in case anyone is later suprised by this action. +epprd_rg:cl_deactivate_fs(1.512)[fs_umount:485] ps ewwww 26083754 PID TTY STAT TIME COMMAND 26083754 - A 19:54 /usr/sap/EPP/J00/exe/sapstartsrv pf=/usr/sap/EPP/SYS/profile/EPP_J00_epprd -D _=/usr/sap/EPP/J00/exe/sapstartsrv LANG=en_US THREAD=NOPS LOGIN=eppadm CLCMD_PASSTHRU=1 PATH=/oracle/EPP/112_64/bin:/usr/bin:/etc:/usr/sbin:/usr/ucb:/home/eppadm/bin:/usr/bin/X11:/sbin:.:/usr/sap/EPP/SYS/exe/uc/rs6000_64:/usr/sap/EPP/SYS/exe/run:/home/eppadm NLS_LANG=AMERICAN_AMERICA.UTF8 EXTENDED_HISTORY=ON RSEC_SSFS_KEYPATH=/usr/sap/EPP/SYS/global/security/rsecssfs/key ORACLE_BASE=/oracle LC__FASTMSG=true LOGNAME=eppadm MAIL=/var/spool/mail/eppadm dbs_ora_tnsname=EPP ORACLE_SID=EPP LOCPATH=/usr/lib/nls/loc DB_SID=EPP DIR_LIBRARY=/usr/sap/EPP/J00/exe USER=eppadm AUTHSTATE=files TNS_ADMIN=/usr/sap/EPP/SYS/profile/oracle IBM_JAVA_OPTIONS=-Xmx256M -Xj9 SAPEXE=/usr/sap/EPP/SYS/exe/run SHELL=/bin/csh ODMDIR=/etc/objrepos CLUSTER_OVERRIDE=yes HISTSIZE=10000 SAPDATA_HOME=/oracle/EPP RSEC_SSFS_DATAPATH=/usr/sap/EPP/SYS/global/security/rsecssfs/data HOME=/home/eppadm TERM=dumb MAILMSG=[YOU HAVE NEW MAIL] ORACLE_HOME=/oracle/EPP/112_64 rsdb_ssfs_connect=0 PWD=/usr/sap/EPP/SYS/profile TZ=KORST-9 dbms_type=ORA SAPSYSTEMNAME=EPP NLSPATH=/usr/lib/nls/msg/%L/%N:/usr/lib/nls/msg/%L/%N.cat:/usr/lib/nls/msg/%l.%c/%N:/usr/lib/nls/msg/%l.%c/%N.cat LIBPATH=/usr/sap/EPP/J00/exe:/usr/sap/EPP/J00/exe:/usr/sap/EPP/J00/exe:/usr/sap/EPP/SYS/exe/run:/usr/sap/EPP/SYS/exe/uc/rs6000_64:/oracle/client/11x_64/instantclient +epprd_rg:cl_deactivate_fs(1.518)[fs_umount:486] [[ true != true ]] +epprd_rg:cl_deactivate_fs(1.518)[fs_umount:517] fuser -O -k -u -x /dev/saplv /dev/saplv: 21823824c(eppadm) 24248652c(root) 24772992c(daaadm) 24969578c(sapadm) 26083754c(eppadm) +epprd_rg:cl_deactivate_fs(1.540)[fs_umount:518] fuser -O -k -u -x -c /usr/sap /usr/sap: +epprd_rg:cl_deactivate_fs(1.607)[fs_umount:519] date '+%h %d %H:%M:%S.000' Sep 28 15:56:04.000 +epprd_rg:cl_deactivate_fs(1.611)[fs_umount:522] : Wait 1 seconds for the kills to be effective +epprd_rg:cl_deactivate_fs(1.611)[fs_umount:524] [[ -n ' 21823824 24248652 24772992 24969578 26083754' ]] +epprd_rg:cl_deactivate_fs(1.611)[fs_umount:526] sleep 1 +epprd_rg:cl_deactivate_fs(2.611)[fs_umount:528] umount /usr/sap +epprd_rg:cl_deactivate_fs(10.382)[fs_umount:531] : Unmount of /usr/sap worked. Can stop now. +epprd_rg:cl_deactivate_fs(10.382)[fs_umount:533] break +epprd_rg:cl_deactivate_fs(10.382)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(10.382)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/usr/sap' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:13.108074 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:13.108074|INFO: Deactivating Filesystem|/usr/sap' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(10.412)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(10.412)[fs_umount:687] print -- 0 /dev/saplv /usr/sap +epprd_rg:cl_deactivate_fs(10.412)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(10.412)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/sapmnt[deactivate_fs_process_resources:764] PS4_LOOP=/sapmnt +epprd_rg:cl_deactivate_fs:/sapmnt[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/sapmnt[deactivate_fs_process_resources:770] fs_umount /sapmnt cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(10.412)[fs_umount:313] FS=/sapmnt +epprd_rg:cl_deactivate_fs(10.412)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(10.412)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(10.412)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(10.412)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(10.412)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(10.412)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(10.433)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(10.433)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(10.433)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(10.433)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(10.433)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(10.433)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(10.433)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(10.434)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(10.434)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(10.434)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(10.434)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(10.434)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(10.434)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(10.434)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(10.434)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(10.434)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(10.434)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(10.435)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/sapmnt +epprd_rg:cl_deactivate_fs(10.435)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(10.440)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(10.440)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(10.440)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(10.440)[fs_umount:367] lsfs -c /sapmnt +epprd_rg:cl_deactivate_fs(10.443)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/sapmnt:/dev/sapmntlv:jfs2:::20971520:rw:no:no' +epprd_rg:cl_deactivate_fs(10.443)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(10.444)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/sapmnt:/dev/sapmntlv:jfs2:::20971520:rw:no:no' +epprd_rg:cl_deactivate_fs(10.445)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(10.446)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(10.446)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(10.447)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(10.447)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(10.447)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(10.449)[fs_umount:394] awk '{ if ( $1 == "/dev/sapmntlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(10.449)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(10.449)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(10.453)[fs_umount:394] FS_MOUNTED=/sapmnt +epprd_rg:cl_deactivate_fs(10.453)[fs_umount:395] [[ -n /sapmnt ]] +epprd_rg:cl_deactivate_fs(10.453)[fs_umount:397] [[ /sapmnt != /sapmnt ]] +epprd_rg:cl_deactivate_fs(10.453)[fs_umount:409] [[ /sapmnt == / ]] +epprd_rg:cl_deactivate_fs(10.453)[fs_umount:409] [[ /sapmnt == /usr ]] +epprd_rg:cl_deactivate_fs(10.453)[fs_umount:409] [[ /sapmnt == /dev ]] +epprd_rg:cl_deactivate_fs(10.453)[fs_umount:409] [[ /sapmnt == /proc ]] +epprd_rg:cl_deactivate_fs(10.453)[fs_umount:409] [[ /sapmnt == /var ]] +epprd_rg:cl_deactivate_fs(10.453)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/sapmnt' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:13.178401 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:13.178401|INFO: Deactivating Filesystem|/sapmnt' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(10.482)[fs_umount:427] : Try up to 60 times to unmount /sapmnt +epprd_rg:cl_deactivate_fs(10.482)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(10.482)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(10.482)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(10.485)[fs_umount:432] : Attempt 1 of 60 to unmount at Sep 28 15:56:13.000 +epprd_rg:cl_deactivate_fs(10.485)[fs_umount:434] umount /sapmnt +epprd_rg:cl_deactivate_fs(11.122)[fs_umount:437] : Unmount of /sapmnt worked. Can stop now. +epprd_rg:cl_deactivate_fs(11.122)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(11.122)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(11.122)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/sapmnt' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:13.847867 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:13.847867|INFO: Deactivating Filesystem|/sapmnt' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(11.152)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(11.152)[fs_umount:687] print -- 0 /dev/sapmntlv /sapmnt +epprd_rg:cl_deactivate_fs(11.152)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(11.152)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata4[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata4[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata4[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/sapdata4 cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(11.152)[fs_umount:313] FS=/oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(11.152)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(11.152)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(11.152)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(11.152)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(11.152)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(11.152)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(11.172)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(11.172)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(11.172)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(11.172)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(11.172)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(11.172)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(11.172)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(11.172)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(11.172)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(11.172)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(11.172)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(11.172)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(11.172)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(11.172)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(11.172)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(11.172)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(11.172)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(11.174)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(11.176)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(11.181)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(11.181)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(11.181)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(11.181)[fs_umount:367] lsfs -c /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(11.184)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata4:/dev/sapdata4lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(11.184)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(11.185)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata4:/dev/sapdata4lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(11.188)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(11.189)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(11.190)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(11.190)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(11.190)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(11.190)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(11.191)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(11.191)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(11.192)[fs_umount:394] awk '{ if ( $1 == "/dev/sapdata4lv" ) print $2 }' +epprd_rg:cl_deactivate_fs(11.197)[fs_umount:394] FS_MOUNTED=/oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(11.197)[fs_umount:395] [[ -n /oracle/EPP/sapdata4 ]] +epprd_rg:cl_deactivate_fs(11.197)[fs_umount:397] [[ /oracle/EPP/sapdata4 != /oracle/EPP/sapdata4 ]] +epprd_rg:cl_deactivate_fs(11.197)[fs_umount:409] [[ /oracle/EPP/sapdata4 == / ]] +epprd_rg:cl_deactivate_fs(11.197)[fs_umount:409] [[ /oracle/EPP/sapdata4 == /usr ]] +epprd_rg:cl_deactivate_fs(11.197)[fs_umount:409] [[ /oracle/EPP/sapdata4 == /dev ]] +epprd_rg:cl_deactivate_fs(11.197)[fs_umount:409] [[ /oracle/EPP/sapdata4 == /proc ]] +epprd_rg:cl_deactivate_fs(11.197)[fs_umount:409] [[ /oracle/EPP/sapdata4 == /var ]] +epprd_rg:cl_deactivate_fs(11.197)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:13.921733 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:13.921733|INFO: Deactivating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(11.225)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(11.225)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(11.225)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(11.225)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(11.228)[fs_umount:432] : Attempt 1 of 60 to unmount at Sep 28 15:56:13.000 +epprd_rg:cl_deactivate_fs(11.228)[fs_umount:434] umount /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(11.381)[fs_umount:437] : Unmount of /oracle/EPP/sapdata4 worked. Can stop now. +epprd_rg:cl_deactivate_fs(11.382)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(11.382)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(11.382)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:14.107337 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:14.107337|INFO: Deactivating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(11.411)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(11.411)[fs_umount:687] print -- 0 /dev/sapdata4lv /oracle/EPP/sapdata4 +epprd_rg:cl_deactivate_fs(11.411)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(11.411)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata3[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata3[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata3[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/sapdata3 cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(11.411)[fs_umount:313] FS=/oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(11.411)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(11.411)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(11.411)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(11.411)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(11.411)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(11.411)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(11.432)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(11.432)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(11.432)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(11.432)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(11.432)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(11.432)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(11.432)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(11.432)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(11.432)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(11.432)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(11.432)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(11.432)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(11.432)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(11.432)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(11.432)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(11.432)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(11.432)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(11.434)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(11.434)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(11.438)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(11.438)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(11.438)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(11.438)[fs_umount:367] lsfs -c /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(11.441)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata3:/dev/sapdata3lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(11.441)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(11.442)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata3:/dev/sapdata3lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(11.443)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(11.444)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(11.444)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(11.445)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(11.445)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(11.445)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(11.447)[fs_umount:394] awk '{ if ( $1 == "/dev/sapdata3lv" ) print $2 }' +epprd_rg:cl_deactivate_fs(11.447)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(11.447)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(11.451)[fs_umount:394] FS_MOUNTED=/oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(11.452)[fs_umount:395] [[ -n /oracle/EPP/sapdata3 ]] +epprd_rg:cl_deactivate_fs(11.452)[fs_umount:397] [[ /oracle/EPP/sapdata3 != /oracle/EPP/sapdata3 ]] +epprd_rg:cl_deactivate_fs(11.452)[fs_umount:409] [[ /oracle/EPP/sapdata3 == / ]] +epprd_rg:cl_deactivate_fs(11.452)[fs_umount:409] [[ /oracle/EPP/sapdata3 == /usr ]] +epprd_rg:cl_deactivate_fs(11.452)[fs_umount:409] [[ /oracle/EPP/sapdata3 == /dev ]] +epprd_rg:cl_deactivate_fs(11.452)[fs_umount:409] [[ /oracle/EPP/sapdata3 == /proc ]] +epprd_rg:cl_deactivate_fs(11.452)[fs_umount:409] [[ /oracle/EPP/sapdata3 == /var ]] +epprd_rg:cl_deactivate_fs(11.452)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:14.176810 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:14.176810|INFO: Deactivating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(11.480)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(11.480)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(11.481)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(11.481)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(11.483)[fs_umount:432] : Attempt 1 of 60 to unmount at Sep 28 15:56:14.000 +epprd_rg:cl_deactivate_fs(11.483)[fs_umount:434] umount /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(11.639)[fs_umount:437] : Unmount of /oracle/EPP/sapdata3 worked. Can stop now. +epprd_rg:cl_deactivate_fs(11.639)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(11.639)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(11.639)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:14.363999 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:14.363999|INFO: Deactivating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(11.668)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(11.668)[fs_umount:687] print -- 0 /dev/sapdata3lv /oracle/EPP/sapdata3 +epprd_rg:cl_deactivate_fs(11.668)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(11.668)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata2[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata2[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata2[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/sapdata2 cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(11.668)[fs_umount:313] FS=/oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(11.668)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(11.668)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(11.668)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(11.668)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(11.668)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(11.668)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(11.688)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(11.688)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(11.688)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(11.688)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(11.689)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(11.689)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(11.689)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(11.689)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(11.689)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(11.689)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(11.689)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(11.689)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(11.689)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(11.689)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(11.689)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(11.689)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(11.689)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(11.690)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(11.690)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(11.695)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(11.695)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(11.695)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(11.695)[fs_umount:367] lsfs -c /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(11.698)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata2:/dev/sapdata2lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(11.698)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(11.699)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata2:/dev/sapdata2lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(11.700)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(11.701)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(11.701)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(11.702)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(11.702)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(11.702)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(11.704)[fs_umount:394] awk '{ if ( $1 == "/dev/sapdata2lv" ) print $2 }' +epprd_rg:cl_deactivate_fs(11.704)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(11.704)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(11.708)[fs_umount:394] FS_MOUNTED=/oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(11.708)[fs_umount:395] [[ -n /oracle/EPP/sapdata2 ]] +epprd_rg:cl_deactivate_fs(11.708)[fs_umount:397] [[ /oracle/EPP/sapdata2 != /oracle/EPP/sapdata2 ]] +epprd_rg:cl_deactivate_fs(11.708)[fs_umount:409] [[ /oracle/EPP/sapdata2 == / ]] +epprd_rg:cl_deactivate_fs(11.708)[fs_umount:409] [[ /oracle/EPP/sapdata2 == /usr ]] +epprd_rg:cl_deactivate_fs(11.708)[fs_umount:409] [[ /oracle/EPP/sapdata2 == /dev ]] +epprd_rg:cl_deactivate_fs(11.708)[fs_umount:409] [[ /oracle/EPP/sapdata2 == /proc ]] +epprd_rg:cl_deactivate_fs(11.708)[fs_umount:409] [[ /oracle/EPP/sapdata2 == /var ]] +epprd_rg:cl_deactivate_fs(11.708)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:14.433120 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:14.433120|INFO: Deactivating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(11.737)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(11.737)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(11.737)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(11.737)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(11.740)[fs_umount:432] : Attempt 1 of 60 to unmount at Sep 28 15:56:14.000 +epprd_rg:cl_deactivate_fs(11.740)[fs_umount:434] umount /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(11.894)[fs_umount:437] : Unmount of /oracle/EPP/sapdata2 worked. Can stop now. +epprd_rg:cl_deactivate_fs(11.894)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(11.894)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(11.894)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:14.620141 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:14.620141|INFO: Deactivating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(11.924)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(11.924)[fs_umount:687] print -- 0 /dev/sapdata2lv /oracle/EPP/sapdata2 +epprd_rg:cl_deactivate_fs(11.924)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(11.924)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata1[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata1[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/sapdata1[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/sapdata1 cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(11.924)[fs_umount:313] FS=/oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(11.924)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(11.924)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(11.924)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(11.924)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(11.924)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(11.924)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(11.944)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(11.945)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(11.945)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(11.945)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(11.945)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(11.945)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(11.945)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(11.945)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(11.945)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(11.945)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(11.945)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(11.945)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(11.945)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(11.945)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(11.945)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(11.945)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(11.945)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(11.946)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(11.946)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(11.951)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(11.951)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(11.951)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(11.951)[fs_umount:367] lsfs -c /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(11.954)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata1:/dev/sapdata1lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(11.954)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(11.955)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata1:/dev/sapdata1lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(11.956)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(11.957)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(11.957)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(11.958)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(11.958)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(11.958)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(11.960)[fs_umount:394] awk '{ if ( $1 == "/dev/sapdata1lv" ) print $2 }' +epprd_rg:cl_deactivate_fs(11.960)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(11.960)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(11.964)[fs_umount:394] FS_MOUNTED=/oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(11.964)[fs_umount:395] [[ -n /oracle/EPP/sapdata1 ]] +epprd_rg:cl_deactivate_fs(11.964)[fs_umount:397] [[ /oracle/EPP/sapdata1 != /oracle/EPP/sapdata1 ]] +epprd_rg:cl_deactivate_fs(11.964)[fs_umount:409] [[ /oracle/EPP/sapdata1 == / ]] +epprd_rg:cl_deactivate_fs(11.964)[fs_umount:409] [[ /oracle/EPP/sapdata1 == /usr ]] +epprd_rg:cl_deactivate_fs(11.964)[fs_umount:409] [[ /oracle/EPP/sapdata1 == /dev ]] +epprd_rg:cl_deactivate_fs(11.964)[fs_umount:409] [[ /oracle/EPP/sapdata1 == /proc ]] +epprd_rg:cl_deactivate_fs(11.964)[fs_umount:409] [[ /oracle/EPP/sapdata1 == /var ]] +epprd_rg:cl_deactivate_fs(11.964)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:14.689466 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:14.689466|INFO: Deactivating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(11.993)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(11.993)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(11.993)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(11.993)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(11.996)[fs_umount:432] : Attempt 1 of 60 to unmount at Sep 28 15:56:14.000 +epprd_rg:cl_deactivate_fs(11.996)[fs_umount:434] umount /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(12.149)[fs_umount:437] : Unmount of /oracle/EPP/sapdata1 worked. Can stop now. +epprd_rg:cl_deactivate_fs(12.149)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(12.149)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(12.149)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:14.874616 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:14.874616|INFO: Deactivating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(12.178)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(12.178)[fs_umount:687] print -- 0 /dev/sapdata1lv /oracle/EPP/sapdata1 +epprd_rg:cl_deactivate_fs(12.178)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(12.178)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/origlogB[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs:/oracle/EPP/origlogB[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/origlogB[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/origlogB cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(12.178)[fs_umount:313] FS=/oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(12.178)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(12.178)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(12.178)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(12.179)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(12.179)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(12.179)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(12.199)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(12.199)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(12.199)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(12.199)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(12.199)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(12.199)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(12.199)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(12.199)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(12.199)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(12.199)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(12.199)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(12.199)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(12.199)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(12.199)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(12.199)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(12.199)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(12.199)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(12.201)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(12.201)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(12.205)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(12.205)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(12.205)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(12.205)[fs_umount:367] lsfs -c /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(12.209)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogB:/dev/origlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(12.209)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(12.210)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogB:/dev/origlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(12.210)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(12.211)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(12.212)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(12.213)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(12.213)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(12.213)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(12.214)[fs_umount:394] awk '{ if ( $1 == "/dev/origlogBlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(12.214)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(12.214)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(12.219)[fs_umount:394] FS_MOUNTED=/oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(12.219)[fs_umount:395] [[ -n /oracle/EPP/origlogB ]] +epprd_rg:cl_deactivate_fs(12.219)[fs_umount:397] [[ /oracle/EPP/origlogB != /oracle/EPP/origlogB ]] +epprd_rg:cl_deactivate_fs(12.219)[fs_umount:409] [[ /oracle/EPP/origlogB == / ]] +epprd_rg:cl_deactivate_fs(12.219)[fs_umount:409] [[ /oracle/EPP/origlogB == /usr ]] +epprd_rg:cl_deactivate_fs(12.219)[fs_umount:409] [[ /oracle/EPP/origlogB == /dev ]] +epprd_rg:cl_deactivate_fs(12.219)[fs_umount:409] [[ /oracle/EPP/origlogB == /proc ]] +epprd_rg:cl_deactivate_fs(12.219)[fs_umount:409] [[ /oracle/EPP/origlogB == /var ]] +epprd_rg:cl_deactivate_fs(12.219)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:14.944007 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:14.944007|INFO: Deactivating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(12.248)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(12.248)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(12.248)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(12.248)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(12.251)[fs_umount:432] : Attempt 1 of 60 to unmount at Sep 28 15:56:14.000 +epprd_rg:cl_deactivate_fs(12.251)[fs_umount:434] umount /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(12.405)[fs_umount:437] : Unmount of /oracle/EPP/origlogB worked. Can stop now. +epprd_rg:cl_deactivate_fs(12.405)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(12.405)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(12.405)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:15.131040 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:15.131040|INFO: Deactivating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(12.435)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(12.435)[fs_umount:687] print -- 0 /dev/origlogBlv /oracle/EPP/origlogB +epprd_rg:cl_deactivate_fs(12.435)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(12.435)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/origlogA[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs:/oracle/EPP/origlogA[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/origlogA[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/origlogA cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(12.435)[fs_umount:313] FS=/oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(12.435)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(12.435)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(12.435)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(12.435)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(12.435)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(12.435)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(12.455)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(12.456)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(12.456)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(12.456)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(12.456)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(12.456)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(12.456)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(12.456)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(12.456)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(12.456)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(12.456)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(12.456)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(12.456)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(12.456)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(12.456)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(12.456)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(12.456)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(12.457)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(12.457)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(12.462)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(12.462)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(12.462)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(12.462)[fs_umount:367] lsfs -c /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(12.465)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogA:/dev/origlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(12.465)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(12.466)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogA:/dev/origlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(12.467)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(12.468)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(12.468)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(12.469)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(12.469)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(12.469)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(12.471)[fs_umount:394] awk '{ if ( $1 == "/dev/origlogAlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(12.471)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(12.471)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(12.475)[fs_umount:394] FS_MOUNTED=/oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(12.475)[fs_umount:395] [[ -n /oracle/EPP/origlogA ]] +epprd_rg:cl_deactivate_fs(12.475)[fs_umount:397] [[ /oracle/EPP/origlogA != /oracle/EPP/origlogA ]] +epprd_rg:cl_deactivate_fs(12.476)[fs_umount:409] [[ /oracle/EPP/origlogA == / ]] +epprd_rg:cl_deactivate_fs(12.476)[fs_umount:409] [[ /oracle/EPP/origlogA == /usr ]] +epprd_rg:cl_deactivate_fs(12.476)[fs_umount:409] [[ /oracle/EPP/origlogA == /dev ]] +epprd_rg:cl_deactivate_fs(12.476)[fs_umount:409] [[ /oracle/EPP/origlogA == /proc ]] +epprd_rg:cl_deactivate_fs(12.476)[fs_umount:409] [[ /oracle/EPP/origlogA == /var ]] +epprd_rg:cl_deactivate_fs(12.476)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:15.200743 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:15.200743|INFO: Deactivating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(12.504)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(12.504)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(12.504)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(12.504)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(12.507)[fs_umount:432] : Attempt 1 of 60 to unmount at Sep 28 15:56:15.000 +epprd_rg:cl_deactivate_fs(12.507)[fs_umount:434] umount /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(12.663)[fs_umount:437] : Unmount of /oracle/EPP/origlogA worked. Can stop now. +epprd_rg:cl_deactivate_fs(12.663)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(12.663)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(12.663)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:15.388790 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:15.388790|INFO: Deactivating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(12.692)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(12.692)[fs_umount:687] print -- 0 /dev/origlogAlv /oracle/EPP/origlogA +epprd_rg:cl_deactivate_fs(12.692)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(12.693)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/oraarch[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs:/oracle/EPP/oraarch[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/oraarch[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/oraarch cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(12.693)[fs_umount:313] FS=/oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(12.693)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(12.693)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(12.693)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(12.693)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(12.693)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(12.693)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(12.713)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(12.713)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(12.713)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(12.713)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(12.713)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(12.713)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(12.713)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(12.713)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(12.713)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(12.713)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(12.713)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(12.713)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(12.713)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(12.713)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(12.713)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(12.713)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(12.713)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(12.715)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(12.715)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(12.720)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(12.720)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(12.720)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(12.720)[fs_umount:367] lsfs -c /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(12.723)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/oraarch:/dev/oraarchlv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(12.723)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(12.724)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/oraarch:/dev/oraarchlv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_deactivate_fs(12.725)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(12.726)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(12.726)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(12.727)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(12.727)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(12.727)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(12.729)[fs_umount:394] awk '{ if ( $1 == "/dev/oraarchlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(12.729)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(12.729)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(12.733)[fs_umount:394] FS_MOUNTED=/oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(12.733)[fs_umount:395] [[ -n /oracle/EPP/oraarch ]] +epprd_rg:cl_deactivate_fs(12.733)[fs_umount:397] [[ /oracle/EPP/oraarch != /oracle/EPP/oraarch ]] +epprd_rg:cl_deactivate_fs(12.733)[fs_umount:409] [[ /oracle/EPP/oraarch == / ]] +epprd_rg:cl_deactivate_fs(12.733)[fs_umount:409] [[ /oracle/EPP/oraarch == /usr ]] +epprd_rg:cl_deactivate_fs(12.733)[fs_umount:409] [[ /oracle/EPP/oraarch == /dev ]] +epprd_rg:cl_deactivate_fs(12.733)[fs_umount:409] [[ /oracle/EPP/oraarch == /proc ]] +epprd_rg:cl_deactivate_fs(12.733)[fs_umount:409] [[ /oracle/EPP/oraarch == /var ]] +epprd_rg:cl_deactivate_fs(12.733)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:15.459247 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:15.459247|INFO: Deactivating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(12.763)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(12.763)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(12.763)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(12.763)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(12.766)[fs_umount:432] : Attempt 1 of 60 to unmount at Sep 28 15:56:15.000 +epprd_rg:cl_deactivate_fs(12.766)[fs_umount:434] umount /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(13.224)[fs_umount:437] : Unmount of /oracle/EPP/oraarch worked. Can stop now. +epprd_rg:cl_deactivate_fs(13.224)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(13.224)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(13.224)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:15.950137 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:15.950137|INFO: Deactivating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(13.254)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(13.254)[fs_umount:687] print -- 0 /dev/oraarchlv /oracle/EPP/oraarch +epprd_rg:cl_deactivate_fs(13.254)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(13.254)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/mirrlogB[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs:/oracle/EPP/mirrlogB[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/mirrlogB[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/mirrlogB cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(13.254)[fs_umount:313] FS=/oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(13.254)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(13.254)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(13.254)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(13.254)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(13.254)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(13.254)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(13.274)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(13.274)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(13.275)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(13.275)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(13.275)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(13.275)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(13.275)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(13.275)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(13.275)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(13.275)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(13.275)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(13.275)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(13.275)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(13.275)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(13.275)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(13.275)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(13.275)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(13.276)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(13.276)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(13.281)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(13.281)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(13.281)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(13.281)[fs_umount:367] lsfs -c /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(13.284)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogB:/dev/mirrlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(13.284)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(13.285)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogB:/dev/mirrlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(13.286)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(13.287)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(13.287)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(13.288)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(13.288)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(13.288)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(13.290)[fs_umount:394] awk '{ if ( $1 == "/dev/mirrlogBlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(13.290)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(13.290)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(13.294)[fs_umount:394] FS_MOUNTED=/oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(13.294)[fs_umount:395] [[ -n /oracle/EPP/mirrlogB ]] +epprd_rg:cl_deactivate_fs(13.294)[fs_umount:397] [[ /oracle/EPP/mirrlogB != /oracle/EPP/mirrlogB ]] +epprd_rg:cl_deactivate_fs(13.294)[fs_umount:409] [[ /oracle/EPP/mirrlogB == / ]] +epprd_rg:cl_deactivate_fs(13.294)[fs_umount:409] [[ /oracle/EPP/mirrlogB == /usr ]] +epprd_rg:cl_deactivate_fs(13.294)[fs_umount:409] [[ /oracle/EPP/mirrlogB == /dev ]] +epprd_rg:cl_deactivate_fs(13.294)[fs_umount:409] [[ /oracle/EPP/mirrlogB == /proc ]] +epprd_rg:cl_deactivate_fs(13.294)[fs_umount:409] [[ /oracle/EPP/mirrlogB == /var ]] +epprd_rg:cl_deactivate_fs(13.294)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:16.019450 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:16.019450|INFO: Deactivating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(13.323)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(13.323)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(13.323)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(13.323)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(13.326)[fs_umount:432] : Attempt 1 of 60 to unmount at Sep 28 15:56:16.000 +epprd_rg:cl_deactivate_fs(13.326)[fs_umount:434] umount /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(13.481)[fs_umount:437] : Unmount of /oracle/EPP/mirrlogB worked. Can stop now. +epprd_rg:cl_deactivate_fs(13.481)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(13.482)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(13.482)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:16.207438 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:16.207438|INFO: Deactivating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(13.511)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(13.511)[fs_umount:687] print -- 0 /dev/mirrlogBlv /oracle/EPP/mirrlogB +epprd_rg:cl_deactivate_fs(13.511)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(13.511)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP/mirrlogA[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs:/oracle/EPP/mirrlogA[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP/mirrlogA[deactivate_fs_process_resources:770] fs_umount /oracle/EPP/mirrlogA cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(13.511)[fs_umount:313] FS=/oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(13.511)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(13.511)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(13.511)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(13.511)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(13.511)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(13.511)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(13.532)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(13.532)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(13.532)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(13.532)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(13.532)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(13.532)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(13.532)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(13.532)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(13.532)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(13.532)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(13.532)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(13.532)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(13.532)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(13.532)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(13.532)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(13.532)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(13.532)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(13.534)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(13.534)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(13.538)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(13.538)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(13.538)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(13.538)[fs_umount:367] lsfs -c /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(13.542)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogA:/dev/mirrlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(13.542)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(13.543)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogA:/dev/mirrlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(13.544)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(13.544)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(13.545)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(13.546)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(13.546)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(13.546)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(13.548)[fs_umount:394] awk '{ if ( $1 == "/dev/mirrlogAlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(13.548)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(13.548)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(13.552)[fs_umount:394] FS_MOUNTED=/oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(13.552)[fs_umount:395] [[ -n /oracle/EPP/mirrlogA ]] +epprd_rg:cl_deactivate_fs(13.552)[fs_umount:397] [[ /oracle/EPP/mirrlogA != /oracle/EPP/mirrlogA ]] +epprd_rg:cl_deactivate_fs(13.552)[fs_umount:409] [[ /oracle/EPP/mirrlogA == / ]] +epprd_rg:cl_deactivate_fs(13.552)[fs_umount:409] [[ /oracle/EPP/mirrlogA == /usr ]] +epprd_rg:cl_deactivate_fs(13.552)[fs_umount:409] [[ /oracle/EPP/mirrlogA == /dev ]] +epprd_rg:cl_deactivate_fs(13.552)[fs_umount:409] [[ /oracle/EPP/mirrlogA == /proc ]] +epprd_rg:cl_deactivate_fs(13.552)[fs_umount:409] [[ /oracle/EPP/mirrlogA == /var ]] +epprd_rg:cl_deactivate_fs(13.552)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:16.277249 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:16.277249|INFO: Deactivating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(13.581)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(13.581)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(13.581)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(13.581)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(13.584)[fs_umount:432] : Attempt 1 of 60 to unmount at Sep 28 15:56:16.000 +epprd_rg:cl_deactivate_fs(13.584)[fs_umount:434] umount /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(13.737)[fs_umount:437] : Unmount of /oracle/EPP/mirrlogA worked. Can stop now. +epprd_rg:cl_deactivate_fs(13.737)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(13.738)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(13.738)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:16.463278 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:16.463278|INFO: Deactivating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(13.767)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(13.767)[fs_umount:687] print -- 0 /dev/mirrlogAlv /oracle/EPP/mirrlogA +epprd_rg:cl_deactivate_fs(13.767)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(13.767)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle/EPP[deactivate_fs_process_resources:764] PS4_LOOP=/oracle/EPP +epprd_rg:cl_deactivate_fs:/oracle/EPP[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle/EPP[deactivate_fs_process_resources:770] fs_umount /oracle/EPP cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(13.767)[fs_umount:313] FS=/oracle/EPP +epprd_rg:cl_deactivate_fs(13.767)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(13.767)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(13.767)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(13.767)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(13.767)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(13.767)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(13.788)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(13.788)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(13.788)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(13.788)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(13.788)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(13.788)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(13.788)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(13.788)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(13.788)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(13.788)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(13.788)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(13.788)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(13.788)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(13.788)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(13.788)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(13.788)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(13.788)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(13.790)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle/EPP +epprd_rg:cl_deactivate_fs(13.790)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(13.794)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(13.794)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(13.794)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(13.794)[fs_umount:367] lsfs -c /oracle/EPP +epprd_rg:cl_deactivate_fs(13.797)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP:/dev/epplv:jfs2:::62914560:rw:no:no' +epprd_rg:cl_deactivate_fs(13.797)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(13.798)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP:/dev/epplv:jfs2:::62914560:rw:no:no' +epprd_rg:cl_deactivate_fs(13.799)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(13.800)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(13.800)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(13.801)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(13.801)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(13.802)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(13.803)[fs_umount:394] awk '{ if ( $1 == "/dev/epplv" ) print $2 }' +epprd_rg:cl_deactivate_fs(13.803)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(13.803)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(13.808)[fs_umount:394] FS_MOUNTED=/oracle/EPP +epprd_rg:cl_deactivate_fs(13.808)[fs_umount:395] [[ -n /oracle/EPP ]] +epprd_rg:cl_deactivate_fs(13.808)[fs_umount:397] [[ /oracle/EPP != /oracle/EPP ]] +epprd_rg:cl_deactivate_fs(13.808)[fs_umount:409] [[ /oracle/EPP == / ]] +epprd_rg:cl_deactivate_fs(13.808)[fs_umount:409] [[ /oracle/EPP == /usr ]] +epprd_rg:cl_deactivate_fs(13.808)[fs_umount:409] [[ /oracle/EPP == /dev ]] +epprd_rg:cl_deactivate_fs(13.808)[fs_umount:409] [[ /oracle/EPP == /proc ]] +epprd_rg:cl_deactivate_fs(13.808)[fs_umount:409] [[ /oracle/EPP == /var ]] +epprd_rg:cl_deactivate_fs(13.808)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:16.532869 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:16.532869|INFO: Deactivating Filesystem|/oracle/EPP' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(13.837)[fs_umount:427] : Try up to 60 times to unmount /oracle/EPP +epprd_rg:cl_deactivate_fs(13.837)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(13.837)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(13.837)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(13.839)[fs_umount:432] : Attempt 1 of 60 to unmount at Sep 28 15:56:16.000 +epprd_rg:cl_deactivate_fs(13.839)[fs_umount:434] umount /oracle/EPP +epprd_rg:cl_deactivate_fs(14.993)[fs_umount:437] : Unmount of /oracle/EPP worked. Can stop now. +epprd_rg:cl_deactivate_fs(14.993)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(14.993)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(14.993)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle/EPP' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:17.718681 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:17.718681|INFO: Deactivating Filesystem|/oracle/EPP' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(15.022)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(15.022)[fs_umount:687] print -- 0 /dev/epplv /oracle/EPP +epprd_rg:cl_deactivate_fs(15.022)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(15.022)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/oracle[deactivate_fs_process_resources:764] PS4_LOOP=/oracle +epprd_rg:cl_deactivate_fs:/oracle[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/oracle[deactivate_fs_process_resources:770] fs_umount /oracle cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(15.023)[fs_umount:313] FS=/oracle +epprd_rg:cl_deactivate_fs(15.023)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(15.023)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(15.023)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(15.023)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(15.023)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(15.023)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(15.043)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(15.043)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(15.043)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(15.043)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(15.043)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(15.043)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(15.043)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(15.043)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(15.043)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(15.043)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(15.043)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(15.043)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(15.043)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(15.043)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(15.043)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(15.043)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(15.043)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(15.045)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/oracle +epprd_rg:cl_deactivate_fs(15.045)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(15.049)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(15.049)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(15.049)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(15.049)[fs_umount:367] lsfs -c /oracle +epprd_rg:cl_deactivate_fs(15.053)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle:/dev/oraclelv:jfs2:::41943040:rw:no:no' +epprd_rg:cl_deactivate_fs(15.053)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(15.054)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle:/dev/oraclelv:jfs2:::41943040:rw:no:no' +epprd_rg:cl_deactivate_fs(15.055)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(15.055)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(15.055)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(15.057)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(15.057)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(15.057)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(15.058)[fs_umount:394] awk '{ if ( $1 == "/dev/oraclelv" ) print $2 }' +epprd_rg:cl_deactivate_fs(15.058)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(15.059)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(15.063)[fs_umount:394] FS_MOUNTED=/oracle +epprd_rg:cl_deactivate_fs(15.063)[fs_umount:395] [[ -n /oracle ]] +epprd_rg:cl_deactivate_fs(15.063)[fs_umount:397] [[ /oracle != /oracle ]] +epprd_rg:cl_deactivate_fs(15.063)[fs_umount:409] [[ /oracle == / ]] +epprd_rg:cl_deactivate_fs(15.063)[fs_umount:409] [[ /oracle == /usr ]] +epprd_rg:cl_deactivate_fs(15.063)[fs_umount:409] [[ /oracle == /dev ]] +epprd_rg:cl_deactivate_fs(15.063)[fs_umount:409] [[ /oracle == /proc ]] +epprd_rg:cl_deactivate_fs(15.063)[fs_umount:409] [[ /oracle == /var ]] +epprd_rg:cl_deactivate_fs(15.063)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/oracle' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:17.788018 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:17.788018|INFO: Deactivating Filesystem|/oracle' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(15.092)[fs_umount:427] : Try up to 60 times to unmount /oracle +epprd_rg:cl_deactivate_fs(15.092)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(15.092)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(15.092)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(15.095)[fs_umount:432] : Attempt 1 of 60 to unmount at Sep 28 15:56:17.000 +epprd_rg:cl_deactivate_fs(15.095)[fs_umount:434] umount /oracle +epprd_rg:cl_deactivate_fs(15.382)[fs_umount:437] : Unmount of /oracle worked. Can stop now. +epprd_rg:cl_deactivate_fs(15.382)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(15.382)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(15.382)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/oracle' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:18.108107 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:18.108107|INFO: Deactivating Filesystem|/oracle' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(15.412)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(15.412)[fs_umount:687] print -- 0 /dev/oraclelv /oracle +epprd_rg:cl_deactivate_fs(15.412)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(15.412)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/board_org[deactivate_fs_process_resources:764] PS4_LOOP=/board_org +epprd_rg:cl_deactivate_fs:/board_org[deactivate_fs_process_resources:765] [[ sequential == parallel ]] +epprd_rg:cl_deactivate_fs:/board_org[deactivate_fs_process_resources:770] fs_umount /board_org cl_deactivate_fs epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(15.412)[fs_umount:313] FS=/board_org +epprd_rg:cl_deactivate_fs(15.412)[fs_umount:313] typeset FS +epprd_rg:cl_deactivate_fs(15.412)[fs_umount:314] PROGNAME=cl_deactivate_fs +epprd_rg:cl_deactivate_fs(15.412)[fs_umount:314] typeset PROGNAME +epprd_rg:cl_deactivate_fs(15.412)[fs_umount:315] TMP_FILENAME=epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(15.412)[fs_umount:315] typeset TMP_FILENAME +epprd_rg:cl_deactivate_fs(15.412)[fs_umount:316] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_deactivate_fs(15.432)[fs_umount:316] WPAR_ROOT='' +epprd_rg:cl_deactivate_fs(15.432)[fs_umount:316] typeset WPAR_ROOT +epprd_rg:cl_deactivate_fs(15.433)[fs_umount:317] STATUS=0 +epprd_rg:cl_deactivate_fs(15.433)[fs_umount:317] typeset -li STATUS +epprd_rg:cl_deactivate_fs(15.433)[fs_umount:318] typeset lv +epprd_rg:cl_deactivate_fs(15.433)[fs_umount:319] typeset fs_type +epprd_rg:cl_deactivate_fs(15.433)[fs_umount:320] typeset count +epprd_rg:cl_deactivate_fs(15.433)[fs_umount:321] typeset line +epprd_rg:cl_deactivate_fs(15.433)[fs_umount:322] RC=0 +epprd_rg:cl_deactivate_fs(15.433)[fs_umount:322] typeset -li RC +epprd_rg:cl_deactivate_fs(15.433)[fs_umount:323] typeset pid +epprd_rg:cl_deactivate_fs(15.433)[fs_umount:324] typeset pidlist +epprd_rg:cl_deactivate_fs(15.433)[fs_umount:325] typeset lv_lsfs +epprd_rg:cl_deactivate_fs(15.433)[fs_umount:326] disable_procfile_debug=false +epprd_rg:cl_deactivate_fs(15.433)[fs_umount:326] typeset disable_procfile_debug +epprd_rg:cl_deactivate_fs(15.433)[fs_umount:327] typeset crossmount_rg +epprd_rg:cl_deactivate_fs(15.433)[fs_umount:330] : Fetch filesystem type and unmount nfs filesystem +epprd_rg:cl_deactivate_fs(15.434)[fs_umount:332] awk '$3==FILESYS && $4~"^nfs."{print $4}' FILESYS=/board_org +epprd_rg:cl_deactivate_fs(15.434)[fs_umount:332] mount +epprd_rg:cl_deactivate_fs(15.439)[fs_umount:332] fs_type='' +epprd_rg:cl_deactivate_fs(15.439)[fs_umount:333] [[ '' == nfs* ]] +epprd_rg:cl_deactivate_fs(15.439)[fs_umount:365] : Get the logical volume associated with the filesystem +epprd_rg:cl_deactivate_fs(15.439)[fs_umount:367] lsfs -c /board_org +epprd_rg:cl_deactivate_fs(15.442)[fs_umount:367] lv_lsfs=$'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(15.442)[fs_umount:382] : Get the logical volume name and filesystem type +epprd_rg:cl_deactivate_fs(15.443)[fs_umount:384] print $'#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_deactivate_fs(15.444)[fs_umount:384] tail -1 +epprd_rg:cl_deactivate_fs(15.445)[fs_umount:384] read skip lv fs_type rest +epprd_rg:cl_deactivate_fs(15.445)[fs_umount:384] IFS=: +epprd_rg:cl_deactivate_fs(15.446)[fs_umount:387] : For WPARs, find the real file system name +epprd_rg:cl_deactivate_fs(15.446)[fs_umount:389] [[ -n '' ]] +epprd_rg:cl_deactivate_fs(15.446)[fs_umount:392] : Check to see if filesystem is mounted. +epprd_rg:cl_deactivate_fs(15.448)[fs_umount:394] awk '{ if ( $1 == "/dev/boardlv" ) print $2 }' +epprd_rg:cl_deactivate_fs(15.448)[fs_umount:394] mount +epprd_rg:cl_deactivate_fs(15.448)[fs_umount:394] LC_ALL=C +epprd_rg:cl_deactivate_fs(15.452)[fs_umount:394] FS_MOUNTED=/board_org +epprd_rg:cl_deactivate_fs(15.452)[fs_umount:395] [[ -n /board_org ]] +epprd_rg:cl_deactivate_fs(15.452)[fs_umount:397] [[ /board_org != /board_org ]] +epprd_rg:cl_deactivate_fs(15.452)[fs_umount:409] [[ /board_org == / ]] +epprd_rg:cl_deactivate_fs(15.452)[fs_umount:409] [[ /board_org == /usr ]] +epprd_rg:cl_deactivate_fs(15.452)[fs_umount:409] [[ /board_org == /dev ]] +epprd_rg:cl_deactivate_fs(15.452)[fs_umount:409] [[ /board_org == /proc ]] +epprd_rg:cl_deactivate_fs(15.452)[fs_umount:409] [[ /board_org == /var ]] +epprd_rg:cl_deactivate_fs(15.452)[fs_umount:425] amlog_trace '' 'Deactivating Filesystem|/board_org' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:18.177868 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:18.177868|INFO: Deactivating Filesystem|/board_org' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(15.481)[fs_umount:427] : Try up to 60 times to unmount /board_org +epprd_rg:cl_deactivate_fs(15.482)[fs_umount:429] (( count=1)) +epprd_rg:cl_deactivate_fs(15.482)[fs_umount:429] (( count <= 60)) +epprd_rg:cl_deactivate_fs(15.482)[fs_umount:432] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_deactivate_fs(15.484)[fs_umount:432] : Attempt 1 of 60 to unmount at Sep 28 15:56:18.000 +epprd_rg:cl_deactivate_fs(15.484)[fs_umount:434] umount /board_org +epprd_rg:cl_deactivate_fs(15.722)[fs_umount:437] : Unmount of /board_org worked. Can stop now. +epprd_rg:cl_deactivate_fs(15.722)[fs_umount:439] break +epprd_rg:cl_deactivate_fs(15.723)[fs_umount:672] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_fs(15.723)[fs_umount:676] amlog_trace '' 'Deactivating Filesystem|/board_org' +epprd_rg:cl_deactivate_fs[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_fs[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_fs[amlog_trace:319] cltime +epprd_rg:cl_deactivate_fs[amlog_trace:319] DATE=2023-09-28T15:56:18.448102 +epprd_rg:cl_deactivate_fs[amlog_trace:320] echo '|2023-09-28T15:56:18.448102|INFO: Deactivating Filesystem|/board_org' +epprd_rg:cl_deactivate_fs[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_fs(15.752)[fs_umount:685] : append status to the status file +epprd_rg:cl_deactivate_fs(15.752)[fs_umount:687] print -- 0 /dev/boardlv /board_org +epprd_rg:cl_deactivate_fs(15.752)[fs_umount:687] 1>> /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs(15.752)[fs_umount:691] return 0 +epprd_rg:cl_deactivate_fs:/board_org[deactivate_fs_process_resources:773] unset PS4_LOOP +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:777] [[ -n '' ]] +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:786] ALLNOERROR=All_non_error_filesystems +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:788] : update resource manager +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:790] cl_RMupdate resource_down All_non_error_filesystems cl_deactivate_fs 2023-09-28T15:56:18.471383 2023-09-28T15:56:18.475808 +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:794] : Check to see how the unmounts went +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:796] [[ -s /tmp/epprd_rg_deactivate_fs.tmp ]] +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:798] grep -qw ^1 /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:805] grep -qw ^11 /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:814] : All unmounts successful +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:816] STATUS=0 +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:817] rm -f /tmp/epprd_rg_deactivate_fs.tmp +epprd_rg:cl_deactivate_fs[deactivate_fs_process_resources:821] return 0 +epprd_rg:cl_deactivate_fs[924] exit 0 +epprd_rg:process_resources[process_file_systems:2668] RC=0 +epprd_rg:process_resources[process_file_systems:2669] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[process_file_systems:2681] (( 0 != 0 )) +epprd_rg:process_resources[process_file_systems:2687] return 0 +epprd_rg:process_resources[3483] RC=0 +epprd_rg:process_resources[3485] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3487] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-09-28T15:56:18.498338 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=VGS ACTION=RELEASE VOLUME_GROUPS='"datavg"' RESOURCE_GROUPS='"epprd_rg' '"' EXPORT_FILESYSTEM='"TRUE"' +epprd_rg:process_resources[1] JOB_TYPE=VGS +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] VOLUME_GROUPS=datavg +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] EXPORT_FILESYSTEM=TRUE +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ VGS == RELEASE ]] +epprd_rg:process_resources[3360] [[ VGS == ONLINE ]] +epprd_rg:process_resources[3571] process_volume_groups_main RELEASE +epprd_rg:process_resources[process_volume_groups_main:2293] PS4_FUNC=process_volume_groups_main +epprd_rg:process_resources[process_volume_groups_main:2293] typeset PS4_FUNC +epprd_rg:process_resources[process_volume_groups_main:2294] [[ high == high ]] +epprd_rg:process_resources[process_volume_groups_main:2294] set -x +epprd_rg:process_resources[process_volume_groups_main:2295] DEF_VARYON_ACTION=0 +epprd_rg:process_resources[process_volume_groups_main:2295] typeset -li DEF_VARYON_ACTION +epprd_rg:process_resources[process_volume_groups_main:2296] FAILURE_IN_METHOD=0 +epprd_rg:process_resources[process_volume_groups_main:2296] typeset -li FAILURE_IN_METHOD +epprd_rg:process_resources[process_volume_groups_main:2297] ACTION=RELEASE +epprd_rg:process_resources[process_volume_groups_main:2297] typeset ACTION +epprd_rg:process_resources[process_volume_groups_main:2298] STAT=0 +epprd_rg:process_resources[process_volume_groups_main:2299] VG_LIST=datavg +epprd_rg:process_resources[process_volume_groups_main:2300] RG_LIST=epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2304] getReplicatedResources epprd_rg +epprd_rg:process_resources[getReplicatedResources:699] PS4_FUNC=getReplicatedResources +epprd_rg:process_resources[getReplicatedResources:699] typeset PS4_FUNC +epprd_rg:process_resources[getReplicatedResources:700] [[ high == high ]] +epprd_rg:process_resources[getReplicatedResources:700] set -x +epprd_rg:process_resources[getReplicatedResources:702] RV=false +epprd_rg:process_resources[getReplicatedResources:704] clodmget -n -f type HACMPrresmethods +epprd_rg:process_resources[getReplicatedResources:704] [[ -n 9 ]] +epprd_rg:process_resources[getReplicatedResources:707] : Replicated resource methods are defined, check for resources +epprd_rg:process_resources[getReplicatedResources:709] clodmget -q $'name like \'*_REP_RESOURCE\' AND group=epprd_rg' -f value -n HACMPresource +epprd_rg:process_resources[getReplicatedResources:709] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:718] : Verify if any backup profiles are configured and trigger cbm utilities based on that +epprd_rg:process_resources[getReplicatedResources:720] clodmget -q name=BACKUP_ENABLED -f value HACMPresource +epprd_rg:process_resources[getReplicatedResources:720] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:739] echo false +epprd_rg:process_resources[process_volume_groups_main:2304] REPLICATED_RESOURCES=false +epprd_rg:process_resources[process_volume_groups_main:2305] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2306] print -- datavg +epprd_rg:process_resources[process_volume_groups_main:2306] read VOLUME_GROUPS VG_LIST +epprd_rg:process_resources[process_volume_groups_main:2306] IFS=: +epprd_rg:process_resources[process_volume_groups_main:2307] VOLUME_GROUPS=datavg +epprd_rg:process_resources[process_volume_groups_main:2310] : At this point, these variables contain information only for epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2312] export VOLUME_GROUPS +epprd_rg:process_resources[process_volume_groups_main:2313] export RESOURCE_GROUPS +epprd_rg:process_resources[process_volume_groups_main:2315] [[ false == true ]] +epprd_rg:process_resources[process_volume_groups_main:2555] process_volume_groups RELEASE +epprd_rg:process_resources[process_volume_groups:2571] PS4_FUNC=process_volume_groups +epprd_rg:process_resources[process_volume_groups:2571] typeset PS4_FUNC +epprd_rg:process_resources[process_volume_groups:2572] [[ high == high ]] +epprd_rg:process_resources[process_volume_groups:2572] set -x +epprd_rg:process_resources[process_volume_groups:2573] STAT=0 +epprd_rg:process_resources[process_volume_groups:2575] GROUPNAME=epprd_rg +epprd_rg:process_resources[process_volume_groups:2575] export GROUPNAME +epprd_rg:process_resources[process_volume_groups:2578] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[process_volume_groups:2603] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[process_volume_groups:2605] cl_deactivate_vgs -n +epprd_rg:cl_deactivate_vgs[458] version=%I% +epprd_rg:cl_deactivate_vgs[461] STATUS=0 +epprd_rg:cl_deactivate_vgs[461] typeset -li STATUS +epprd_rg:cl_deactivate_vgs[462] TMP_VARYOFF_STATUS=/tmp/_deactivate_vgs.tmp +epprd_rg:cl_deactivate_vgs[463] sddsrv_off=FALSE +epprd_rg:cl_deactivate_vgs[464] ALLVGS=All_volume_groups +epprd_rg:cl_deactivate_vgs[465] OEM_CALL=false +epprd_rg:cl_deactivate_vgs[467] (( 1 != 0 )) +epprd_rg:cl_deactivate_vgs[467] [[ -n == -c ]] +epprd_rg:cl_deactivate_vgs[476] EVENT_TYPE=RELEASE_PRIMARY +epprd_rg:cl_deactivate_vgs[477] EVENT_TYPE=RELEASE_PRIMARY +epprd_rg:cl_deactivate_vgs[480] : if JOB_TYPE is set and is not $'\'GROUP\',' then process_resources is parent +epprd_rg:cl_deactivate_vgs[482] [[ VGS != 0 ]] +epprd_rg:cl_deactivate_vgs[482] [[ VGS != GROUP ]] +epprd_rg:cl_deactivate_vgs[485] : parameters passed from process_resources thru environment +epprd_rg:cl_deactivate_vgs[487] PROC_RES=true +epprd_rg:cl_deactivate_vgs[501] : set -u will report an error if any variable used in the script is not set +epprd_rg:cl_deactivate_vgs[503] set -u +epprd_rg:cl_deactivate_vgs[506] : Remove the status file if it currently exists +epprd_rg:cl_deactivate_vgs[508] rm -f /tmp/_deactivate_vgs.tmp +epprd_rg:cl_deactivate_vgs[511] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_deactivate_vgs[512] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_deactivate_vgs[513] : 99.99.999.999 +epprd_rg:cl_deactivate_vgs[515] typeset -li V R M F +epprd_rg:cl_deactivate_vgs[516] typeset -Z2 R +epprd_rg:cl_deactivate_vgs[517] typeset -Z3 M +epprd_rg:cl_deactivate_vgs[518] typeset -Z3 F +epprd_rg:cl_deactivate_vgs[519] VRMF=0 +epprd_rg:cl_deactivate_vgs[519] typeset -li VRMF +epprd_rg:cl_deactivate_vgs[528] ls '/dev/vpath*' +epprd_rg:cl_deactivate_vgs[528] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_vgs[595] : Special processing for 2-node NFS clusters +epprd_rg:cl_deactivate_vgs[597] TWO_NODE_CLUSTER=FALSE +epprd_rg:cl_deactivate_vgs[597] export TWO_NODE_CLUSTER +epprd_rg:cl_deactivate_vgs[598] FS_TYPES='jsf2?log' +epprd_rg:cl_deactivate_vgs[598] export FS_TYPES +epprd_rg:cl_deactivate_vgs[599] wc -l +epprd_rg:cl_deactivate_vgs[599] clodmget -q 'object = VERBOSE_LOGGING' -f name -n HACMPnode +epprd_rg:cl_deactivate_vgs[599] (( 2 == 2 )) +epprd_rg:cl_deactivate_vgs[600] [[ -n TRUE ]] +epprd_rg:cl_deactivate_vgs[602] : two nodes, with exported filesystems +epprd_rg:cl_deactivate_vgs[603] TWO_NODE_CLUSTER=TRUE +epprd_rg:cl_deactivate_vgs[603] export TWO_NODE_CLUSTER +epprd_rg:cl_deactivate_vgs[607] : Pick up a list of currently varyd on volume groups +epprd_rg:cl_deactivate_vgs[609] lsvg -L -o +epprd_rg:cl_deactivate_vgs[609] 2> /tmp/lsvg.err +epprd_rg:cl_deactivate_vgs[609] VG_ON_LIST=$'datavg\ncaavg_private\nrootvg' +epprd_rg:cl_deactivate_vgs[612] : if not called from process_resources, use old-style environment and parameters +epprd_rg:cl_deactivate_vgs[614] [[ true == false ]] +epprd_rg:cl_deactivate_vgs[672] : Called from process_resources +epprd_rg:cl_deactivate_vgs[674] LIST_OF_VOLUME_GROUPS_FOR_RG='' +epprd_rg:cl_deactivate_vgs[679] export GROUPNAME +epprd_rg:cl_deactivate_vgs[681] : Discover the volume groups for this resource group. +epprd_rg:cl_deactivate_vgs[686] echo datavg +epprd_rg:cl_deactivate_vgs[686] read LIST_OF_VOLUME_GROUPS_FOR_RG VOLUME_GROUPS +epprd_rg:cl_deactivate_vgs[686] IFS=: +epprd_rg:cl_deactivate_vgs[689] : Reverse the order, so that VGs release in reverse order of acquisition +epprd_rg:cl_deactivate_vgs[693] sed 's/ /,/g' +epprd_rg:cl_deactivate_vgs[693] echo datavg +epprd_rg:cl_deactivate_vgs[693] LIST_OF_COMMASEP_VG_FOR_RG=datavg +epprd_rg:cl_deactivate_vgs[694] echo datavg +epprd_rg:cl_deactivate_vgs[695] tr , '\n' +epprd_rg:cl_deactivate_vgs[695] egrep -v -w $'rootvg|caavg_private\n |altinst_rootvg|old_rootvg' +epprd_rg:cl_deactivate_vgs[696] sort -ru +epprd_rg:cl_deactivate_vgs[694] LIST_OF_VOLUME_GROUPS_FOR_RG=datavg +epprd_rg:cl_deactivate_vgs[698] : Update Resource Manager - releasing VGs for this RG +epprd_rg:cl_deactivate_vgs[700] cl_RMupdate resource_releasing All_volume_groups cl_deactivate_vgs 2023-09-28T15:56:18.587787 2023-09-28T15:56:18.592343 +epprd_rg:cl_deactivate_vgs[703] : Process the volume groups for this resource group +epprd_rg:cl_deactivate_vgs:datavg[707] PS4_LOOP=datavg +epprd_rg:cl_deactivate_vgs:datavg[711] print datavg caavg_private rootvg +epprd_rg:cl_deactivate_vgs:datavg[711] grep -qw datavg +epprd_rg:cl_deactivate_vgs:datavg[719] : Thie VG is varied on, so go vary it off. Get the VG mode first +epprd_rg:cl_deactivate_vgs:datavg[721] MODE=9999 +epprd_rg:cl_deactivate_vgs:datavg[722] /usr/sbin/getlvodm -v datavg +epprd_rg:cl_deactivate_vgs:datavg[722] VGID=00c44af100004b00000001851e9dc053 +epprd_rg:cl_deactivate_vgs:datavg[723] lqueryvg -g 00c44af100004b00000001851e9dc053 -X +epprd_rg:cl_deactivate_vgs:datavg[723] MODE=32 +epprd_rg:cl_deactivate_vgs:datavg[724] RC=0 +epprd_rg:cl_deactivate_vgs:datavg[725] (( 0 != 0 )) +epprd_rg:cl_deactivate_vgs:datavg[726] : exit status of lqueryvg -g 00c44af100004b00000001851e9dc053 -X: 0 +epprd_rg:cl_deactivate_vgs:datavg[728] vgs_varyoff datavg 32 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:60] PS4_TIMER=true +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:60] typeset PS4_TIMER +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:61] [[ high == high ]] +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:61] set -x +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:63] VG=datavg +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:63] typeset VG +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:64] MODE=32 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:64] typeset MODE +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:66] OPEN_FSs='' +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:66] typeset OPEN_FSs +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:67] OPEN_LVs='' +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:67] typeset OPEN_LVs +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:68] typeset TMP_VG_LIST +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:69] TS_FLAGS='' +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:69] typeset TS_FLAGS +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:71] STATUS=0 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:71] typeset -li STATUS +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:72] RC=0 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:72] typeset -li RC +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:73] SELECTIVE_FAILOVER=false +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:73] typeset SELECTIVE_FAILOVER +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:74] typeset LV +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:75] lv_list='' +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:75] typeset lv_list +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:76] typeset FS +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:77] FS_MOUNTED='' +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:77] typeset FS_MOUNTED +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:79] rc_fuser=0 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:79] typeset -li rc_fuser +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:80] rc_varyonvg=0 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:80] typeset -li rc_varyonvg +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:81] rc_varyoffvg=0 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:81] typeset -li rc_varyoffvg +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:82] rc_lsvg=0 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:82] typeset -li rc_lsvg +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:83] rc_dfs=0 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:83] typeset -li rc_dfs +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:84] rc_dvg=0 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:84] typeset -li rc_dvg +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:88] typeset -li FV FR FM FF +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:89] typeset -Z2 FR +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:90] typeset -Z3 FM +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:91] typeset -Z3 FF +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:92] FVRMF=0 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:92] typeset -li FVRMF +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:93] fuser_lvl=601004000 +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:93] typeset -li fuser_lvl +epprd_rg:cl_deactivate_vgs(0.094):datavg[vgs_varyoff:95] lsvg -l -L datavg +epprd_rg:cl_deactivate_vgs(0.095):datavg[vgs_varyoff:95] 2> /dev/null +epprd_rg:cl_deactivate_vgs(0.115):datavg[vgs_varyoff:95] TMP_VG_LIST=$'datavg:\nLV NAME TYPE LPs PPs PVs LV STATE MOUNT POINT\nepprdaloglv jfs2log 1 1 1 closed/syncd N/A\nsaplv jfs2 100 100 7 closed/syncd /usr/sap\nsapmntlv jfs2 20 20 7 closed/syncd /sapmnt\noraclelv jfs2 40 40 7 closed/syncd /oracle\nepplv jfs2 60 60 7 closed/syncd /oracle/EPP\noraarchlv jfs2 100 100 7 closed/syncd /oracle/EPP/oraarch\nsapdata1lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata1\nsapdata2lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata2\nsapdata3lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata3\nsapdata4lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata4\nboardlv jfs2 10 10 7 closed/syncd /board_org\noriglogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogA\noriglogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogB\nmirrlogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogA\nmirrlogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_vgs(0.115):datavg[vgs_varyoff:96] rc_lsvg=0 +epprd_rg:cl_deactivate_vgs(0.115):datavg[vgs_varyoff:98] [[ RELEASE_PRIMARY == reconfig* ]] +epprd_rg:cl_deactivate_vgs(0.115):datavg[vgs_varyoff:114] [[ -n $'datavg:\nLV NAME TYPE LPs PPs PVs LV STATE MOUNT POINT\nepprdaloglv jfs2log 1 1 1 closed/syncd N/A\nsaplv jfs2 100 100 7 closed/syncd /usr/sap\nsapmntlv jfs2 20 20 7 closed/syncd /sapmnt\noraclelv jfs2 40 40 7 closed/syncd /oracle\nepplv jfs2 60 60 7 closed/syncd /oracle/EPP\noraarchlv jfs2 100 100 7 closed/syncd /oracle/EPP/oraarch\nsapdata1lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata1\nsapdata2lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata2\nsapdata3lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata3\nsapdata4lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata4\nboardlv jfs2 10 10 7 closed/syncd /board_org\noriglogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogA\noriglogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogB\nmirrlogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogA\nmirrlogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogB' ]] +epprd_rg:cl_deactivate_vgs(0.115):datavg[vgs_varyoff:117] : Get list of open logical volumes corresponding to filesystems +epprd_rg:cl_deactivate_vgs(0.117):datavg[vgs_varyoff:119] awk '$2 ~ /jfs2?$/ && $6 ~ /open/ {print $1}' +epprd_rg:cl_deactivate_vgs(0.117):datavg[vgs_varyoff:119] print $'datavg:\nLV NAME TYPE LPs PPs PVs LV STATE MOUNT POINT\nepprdaloglv jfs2log 1 1 1 closed/syncd N/A\nsaplv jfs2 100 100 7 closed/syncd /usr/sap\nsapmntlv jfs2 20 20 7 closed/syncd /sapmnt\noraclelv jfs2 40 40 7 closed/syncd /oracle\nepplv jfs2 60 60 7 closed/syncd /oracle/EPP\noraarchlv jfs2 100 100 7 closed/syncd /oracle/EPP/oraarch\nsapdata1lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata1\nsapdata2lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata2\nsapdata3lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata3\nsapdata4lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata4\nboardlv jfs2 10 10 7 closed/syncd /board_org\noriglogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogA\noriglogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogB\nmirrlogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogA\nmirrlogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_vgs(0.121):datavg[vgs_varyoff:119] OPEN_LVs='' +epprd_rg:cl_deactivate_vgs(0.121):datavg[vgs_varyoff:122] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_vgs(0.121):datavg[vgs_varyoff:140] [[ -n '' ]] +epprd_rg:cl_deactivate_vgs(0.121):datavg[vgs_varyoff:167] [[ TRUE == TRUE ]] +epprd_rg:cl_deactivate_vgs(0.121):datavg[vgs_varyoff:170] : For two-node clusters, special processing for the highly available NFS +epprd_rg:cl_deactivate_vgs(0.121):datavg[vgs_varyoff:171] : server function: tell NFS to dump the dup cache into the jfslog or jfs2log +epprd_rg:cl_deactivate_vgs(0.121):datavg[vgs_varyoff:175] : Find the first log device in the saved list of logical volumes +epprd_rg:cl_deactivate_vgs(0.121):datavg[vgs_varyoff:177] pattern='jsf2?log' +epprd_rg:cl_deactivate_vgs(0.122):datavg[vgs_varyoff:178] awk '$2 ~ /jsf2?log/ {printf "/dev/%s\n", $1 ; exit}' +epprd_rg:cl_deactivate_vgs(0.122):datavg[vgs_varyoff:178] print $'datavg:\nLV NAME TYPE LPs PPs PVs LV STATE MOUNT POINT\nepprdaloglv jfs2log 1 1 1 closed/syncd N/A\nsaplv jfs2 100 100 7 closed/syncd /usr/sap\nsapmntlv jfs2 20 20 7 closed/syncd /sapmnt\noraclelv jfs2 40 40 7 closed/syncd /oracle\nepplv jfs2 60 60 7 closed/syncd /oracle/EPP\noraarchlv jfs2 100 100 7 closed/syncd /oracle/EPP/oraarch\nsapdata1lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata1\nsapdata2lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata2\nsapdata3lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata3\nsapdata4lv jfs2 100 100 7 closed/syncd /oracle/EPP/sapdata4\nboardlv jfs2 10 10 7 closed/syncd /board_org\noriglogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogA\noriglogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/origlogB\nmirrlogAlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogA\nmirrlogBlv jfs2 10 10 7 closed/syncd /oracle/EPP/mirrlogB' +epprd_rg:cl_deactivate_vgs(0.126):datavg[vgs_varyoff:178] logdev='' +epprd_rg:cl_deactivate_vgs(0.126):datavg[vgs_varyoff:180] [[ -z '' ]] +epprd_rg:cl_deactivate_vgs(0.126):datavg[vgs_varyoff:181] [[ true == true ]] +epprd_rg:cl_deactivate_vgs(0.126):datavg[vgs_varyoff:182] [[ ONLINE != ONLINE ]] +epprd_rg:cl_deactivate_vgs(0.126):datavg[vgs_varyoff:216] [[ -n '' ]] +epprd_rg:cl_deactivate_vgs(0.126):datavg[vgs_varyoff:223] : Finally, vary off the volume group +epprd_rg:cl_deactivate_vgs(0.126):datavg[vgs_varyoff:226] amlog_trace '' 'Deactivating Volume Group|datavg' +epprd_rg:cl_deactivate_vgs(0.126):datavg[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_vgs(0.127):datavg[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_vgs(0.152):datavg[amlog_trace:319] cltime +epprd_rg:cl_deactivate_vgs(0.154):datavg[amlog_trace:319] DATE=2023-09-28T15:56:18.679588 +epprd_rg:cl_deactivate_vgs(0.154):datavg[amlog_trace:320] echo '|2023-09-28T15:56:18.679588|INFO: Deactivating Volume Group|datavg' +epprd_rg:cl_deactivate_vgs(0.155):datavg[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_vgs(0.155):datavg[vgs_varyoff:228] [[ 32 == 32 ]] +epprd_rg:cl_deactivate_vgs(0.155):datavg[vgs_varyoff:231] : This VG is ECM. Move to passive mode. +epprd_rg:cl_deactivate_vgs(0.155):datavg[vgs_varyoff:244] TS_FLAGS=-o +epprd_rg:cl_deactivate_vgs(0.155):datavg[vgs_varyoff:245] cltime 2023-09-28T15:56:18.682333 +epprd_rg:cl_deactivate_vgs(0.157):datavg[vgs_varyoff:246] varyonvg -c -n -P datavg +epprd_rg:cl_deactivate_vgs(0.158):datavg[vgs_varyoff:246] 2> /dev/null +epprd_rg:cl_deactivate_vgs(0.291):datavg[vgs_varyoff:247] rc_varyonvg=0 +epprd_rg:cl_deactivate_vgs(0.291):datavg[vgs_varyoff:248] : return code from varyonvg -c -n -P datavg is 0 +epprd_rg:cl_deactivate_vgs(0.291):datavg[vgs_varyoff:249] cltime 2023-09-28T15:56:18.818542 +epprd_rg:cl_deactivate_vgs(0.293):datavg[vgs_varyoff:250] (( 0 != 0 )) +epprd_rg:cl_deactivate_vgs(0.294):datavg[vgs_varyoff:277] [[ 0 != 0 ]] +epprd_rg:cl_deactivate_vgs(0.294):datavg[vgs_varyoff:281] amlog_trace '' 'Deactivating Volume Group|datavg' +epprd_rg:cl_deactivate_vgs(0.294):datavg[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_deactivate_vgs(0.294):datavg[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_deactivate_vgs(0.319):datavg[amlog_trace:319] cltime +epprd_rg:cl_deactivate_vgs(0.321):datavg[amlog_trace:319] DATE=2023-09-28T15:56:18.846624 +epprd_rg:cl_deactivate_vgs(0.322):datavg[amlog_trace:320] echo '|2023-09-28T15:56:18.846624|INFO: Deactivating Volume Group|datavg' +epprd_rg:cl_deactivate_vgs(0.322):datavg[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_deactivate_vgs(0.322):datavg[vgs_varyoff:284] RC=0 +epprd_rg:cl_deactivate_vgs(0.322):datavg[vgs_varyoff:287] : Update LVM volume group timestamps in ODM +epprd_rg:cl_deactivate_vgs(0.322):datavg[vgs_varyoff:289] cl_update_vg_odm_ts -o datavg +epprd_rg:cl_update_vg_odm_ts(0.001)[77] version=1.13 +epprd_rg:cl_update_vg_odm_ts(0.001)[121] o_flag='' +epprd_rg:cl_update_vg_odm_ts(0.001)[122] f_flag='' +epprd_rg:cl_update_vg_odm_ts(0.001)[123] getopts :of option +epprd_rg:cl_update_vg_odm_ts(0.001)[126] : Local timestamps should be good, since volume group was +epprd_rg:cl_update_vg_odm_ts(0.001)[127] : just varyied on or off +epprd_rg:cl_update_vg_odm_ts(0.001)[128] o_flag=TRUE +epprd_rg:cl_update_vg_odm_ts(0.001)[123] getopts :of option +epprd_rg:cl_update_vg_odm_ts(0.001)[142] shift 1 +epprd_rg:cl_update_vg_odm_ts(0.001)[144] vg_name=datavg +epprd_rg:cl_update_vg_odm_ts(0.001)[145] [[ -z datavg ]] +epprd_rg:cl_update_vg_odm_ts(0.001)[151] shift +epprd_rg:cl_update_vg_odm_ts(0.001)[152] node_list='' +epprd_rg:cl_update_vg_odm_ts(0.001)[153] /usr/es/sbin/cluster/utilities/cl_get_path all +epprd_rg:cl_update_vg_odm_ts(0.004)[153] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin +epprd_rg:cl_update_vg_odm_ts(0.004)[155] [[ -z '' ]] +epprd_rg:cl_update_vg_odm_ts(0.004)[158] : Check to see if this update is necessary - some LVM levels automatically +epprd_rg:cl_update_vg_odm_ts(0.004)[159] : update volume group timestamps clusterwide. +epprd_rg:cl_update_vg_odm_ts(0.004)[163] instfix -iqk IV74100 +epprd_rg:cl_update_vg_odm_ts(0.005)[163] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.012)[164] instfix -iqk IV74883 +epprd_rg:cl_update_vg_odm_ts(0.013)[164] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.020)[165] instfix -iqk IV74698 +epprd_rg:cl_update_vg_odm_ts(0.021)[165] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.028)[166] instfix -iqk IV74246 +epprd_rg:cl_update_vg_odm_ts(0.029)[166] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.036)[174] emgr -l -L IV74883 +epprd_rg:cl_update_vg_odm_ts(0.036)[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.289)[174] emgr -l -L IV74698 +epprd_rg:cl_update_vg_odm_ts(0.290)[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.543)[174] emgr -l -L IV74246 +epprd_rg:cl_update_vg_odm_ts(0.544)[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.796)[183] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_update_vg_odm_ts(0.796)[184] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_update_vg_odm_ts(0.796)[185] : 99.99.999.999 +epprd_rg:cl_update_vg_odm_ts(0.796)[187] typeset -li V R M F +epprd_rg:cl_update_vg_odm_ts(0.796)[188] typeset -Z2 V +epprd_rg:cl_update_vg_odm_ts(0.796)[189] typeset -Z2 R +epprd_rg:cl_update_vg_odm_ts(0.796)[190] typeset -Z3 M +epprd_rg:cl_update_vg_odm_ts(0.796)[191] typeset -Z3 F +epprd_rg:cl_update_vg_odm_ts(0.796)[192] lvm_lvl6=601008015 +epprd_rg:cl_update_vg_odm_ts(0.796)[192] typeset -li lvm_lvl6 +epprd_rg:cl_update_vg_odm_ts(0.797)[194] lvm_lvl7=701003046 +epprd_rg:cl_update_vg_odm_ts(0.797)[194] typeset -li lvm_lvl7 +epprd_rg:cl_update_vg_odm_ts(0.797)[195] VRMF=0 +epprd_rg:cl_update_vg_odm_ts(0.797)[195] typeset -li VRMF +epprd_rg:cl_update_vg_odm_ts(0.797)[198] : Here try and figure out what level of LVM is installed +epprd_rg:cl_update_vg_odm_ts(0.798)[200] lslpp -lcqOr bos.rte.lvm +epprd_rg:cl_update_vg_odm_ts(0.798)[200] cut -f3 -d: +epprd_rg:cl_update_vg_odm_ts(0.800)[200] read V R M F +epprd_rg:cl_update_vg_odm_ts(0.800)[200] IFS=. +epprd_rg:cl_update_vg_odm_ts(0.800)[201] VRMF=0702005101 +epprd_rg:cl_update_vg_odm_ts(0.800)[203] (( 7 == 6 && 702005101 >= 601008015 )) +epprd_rg:cl_update_vg_odm_ts(0.800)[204] (( 702005101 >= 701003046 )) +epprd_rg:cl_update_vg_odm_ts(0.800)[207] : LVM at a level in which timestamp update is unnecessary +epprd_rg:cl_update_vg_odm_ts(0.800)[209] return 0 +epprd_rg:cl_deactivate_vgs(1.126):datavg[vgs_varyoff:291] (( 0 == 0 )) +epprd_rg:cl_deactivate_vgs(1.126):datavg[vgs_varyoff:294] : successful varyoff, set the fence height to read-only +epprd_rg:cl_deactivate_vgs(1.126):datavg[vgs_varyoff:297] cl_set_vg_fence_height -c datavg ro cl_set_vg_fence_height[126]: version @(#)10 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 cl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY) cl_set_vg_fence_height[214]: read(datavg, 16) cl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=ro(2)) +epprd_rg:cl_deactivate_vgs(1.130):datavg[vgs_varyoff:298] RC=0 +epprd_rg:cl_deactivate_vgs(1.130):datavg[vgs_varyoff:299] (( 0 != 0 )) +epprd_rg:cl_deactivate_vgs(1.130):datavg[vgs_varyoff:403] : Append status to the status file. +epprd_rg:cl_deactivate_vgs(1.130):datavg[vgs_varyoff:407] echo datavg 0 +epprd_rg:cl_deactivate_vgs(1.130):datavg[vgs_varyoff:407] 1>> /tmp/_deactivate_vgs.tmp +epprd_rg:cl_deactivate_vgs(1.130):datavg[vgs_varyoff:408] return 0 +epprd_rg:cl_deactivate_vgs(1.130):datavg[731] unset PS4_LOOP +epprd_rg:cl_deactivate_vgs(1.130)[736] : Wait for the background instances of vgs_varyoff +epprd_rg:cl_deactivate_vgs(1.130)[738] wait +epprd_rg:cl_deactivate_vgs(1.130)[741] : Collect any failure indications from backgrounded varyoff processing +epprd_rg:cl_deactivate_vgs(1.130)[743] [[ -f /tmp/_deactivate_vgs.tmp ]] +epprd_rg:cl_deactivate_vgs(1.131)[748] cat /tmp/_deactivate_vgs.tmp +epprd_rg:cl_deactivate_vgs(1.131)[748] read VGNAME VARYOFF_STATUS +epprd_rg:cl_deactivate_vgs(1.133)[750] [[ 0 == 1 ]] +epprd_rg:cl_deactivate_vgs(1.133)[748] read VGNAME VARYOFF_STATUS +epprd_rg:cl_deactivate_vgs(1.133)[765] rm -f /tmp/_deactivate_vgs.tmp +epprd_rg:cl_deactivate_vgs(1.136)[769] : Update Resource Manager - release success for the non-error VGs +epprd_rg:cl_deactivate_vgs(1.136)[771] ALLNOERRVGS=All_nonerror_volume_groups +epprd_rg:cl_deactivate_vgs(1.136)[772] [[ true == false ]] +epprd_rg:cl_deactivate_vgs(1.136)[778] cl_RMupdate resource_down All_nonerror_volume_groups cl_deactivate_vgs 2023-09-28T15:56:19.683856 2023-09-28T15:56:19.688416 +epprd_rg:cl_deactivate_vgs(1.164)[782] [[ FALSE == TRUE ]] +epprd_rg:cl_deactivate_vgs(1.164)[791] exit 0 +epprd_rg:process_resources[process_volume_groups:2606] RC=0 +epprd_rg:process_resources[process_volume_groups:2607] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[process_volume_groups:2620] (( 0 != 0 )) +epprd_rg:process_resources[process_volume_groups:2627] return 0 +epprd_rg:process_resources[process_volume_groups_main:2556] STAT=0 +epprd_rg:process_resources[process_volume_groups_main:2559] return 0 +epprd_rg:process_resources[3572] RC=0 +epprd_rg:process_resources[3573] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3575] [[ 0 != 0 ]] +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-09-28T15:56:19.702037 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=SERVICE_LABELS ACTION=RELEASE IP_LABELS='"epprd"' RESOURCE_GROUPS='"epprd_rg' '"' COMMUNICATION_LINKS='""' +epprd_rg:process_resources[1] JOB_TYPE=SERVICE_LABELS +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] IP_LABELS=epprd +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] COMMUNICATION_LINKS='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ SERVICE_LABELS == RELEASE ]] +epprd_rg:process_resources[3360] [[ SERVICE_LABELS == ONLINE ]] +epprd_rg:process_resources[3407] [[ RELEASE == ACQUIRE ]] +epprd_rg:process_resources[3411] release_service_labels +epprd_rg:process_resources[release_service_labels:3125] PS4_FUNC=release_service_labels +epprd_rg:process_resources[release_service_labels:3125] typeset PS4_FUNC +epprd_rg:process_resources[release_service_labels:3126] [[ high == high ]] +epprd_rg:process_resources[release_service_labels:3126] set -x +epprd_rg:process_resources[release_service_labels:3127] STAT=0 +epprd_rg:process_resources[release_service_labels:3128] clcallev release_service_addr Sep 28 2023 15:56:19 EVENT START: release_service_addr |2023-09-28T15:56:19|8608|EVENT START: release_service_addr | +epprd_rg:release_service_addr[87] version=1.44 +epprd_rg:release_service_addr[90] STATUS=0 +epprd_rg:release_service_addr[91] PROC_RES=false +epprd_rg:release_service_addr[95] [[ SERVICE_LABELS != 0 ]] +epprd_rg:release_service_addr[95] [[ SERVICE_LABELS != GROUP ]] +epprd_rg:release_service_addr[96] PROC_RES=true +epprd_rg:release_service_addr[97] _IP_LABELS=epprd +epprd_rg:release_service_addr[109] saveNSORDER=UNDEFINED +epprd_rg:release_service_addr[110] NSORDER=local +epprd_rg:release_service_addr[110] export NSORDER +epprd_rg:release_service_addr[117] export GROUPNAME +epprd_rg:release_service_addr[119] [[ true == true ]] +epprd_rg:release_service_addr[120] get_list_head epprd +epprd_rg:release_service_addr[120] read SERVICELABELS +epprd_rg:release_service_addr[121] get_list_tail epprd +epprd_rg:release_service_addr[121] read IP_LABELS +epprd_rg:release_service_addr[127] cl_RMupdate resource_releasing All_service_addrs release_service_addr 2023-09-28T15:56:19.787041 2023-09-28T15:56:19.791517 +epprd_rg:release_service_addr[136] clgetif -a epprd +epprd_rg:release_service_addr[136] LC_ALL=C en0 +epprd_rg:release_service_addr[137] return_code=0 +epprd_rg:release_service_addr[137] typeset -li return_code +epprd_rg:release_service_addr[138] (( 0 )) +epprd_rg:release_service_addr[159] cllsif -J '~' -Sn epprd +epprd_rg:release_service_addr[159] cut -d~ -f7 +epprd_rg:release_service_addr[159] uniq +epprd_rg:release_service_addr[159] textual_addr=61.81.244.156 +epprd_rg:release_service_addr[160] clgetif -a 61.81.244.156 +epprd_rg:release_service_addr[160] LC_ALL=C +epprd_rg:release_service_addr[160] INTERFACE='en0 ' +epprd_rg:release_service_addr[161] [[ -z 'en0 ' ]] +epprd_rg:release_service_addr[182] clgetif -n 61.81.244.156 +epprd_rg:release_service_addr[182] LC_ALL=C +epprd_rg:release_service_addr[182] NETMASK='255.255.255.0 ' +epprd_rg:release_service_addr[183] cllsif -J '~' +epprd_rg:release_service_addr[183] grep -wF 61.81.244.156 +epprd_rg:release_service_addr[184] cut -d~ -f3 +epprd_rg:release_service_addr[184] sort -u +epprd_rg:release_service_addr[183] NETWORK=net_ether_01 +epprd_rg:release_service_addr[189] cllsif -J '~' -Si epprda +epprd_rg:release_service_addr[189] grep '~boot~' +epprd_rg:release_service_addr[190] cut -d~ -f3,7 +epprd_rg:release_service_addr[190] grep ^net_ether_01~ +epprd_rg:release_service_addr[191] cut -d~ -f2 +epprd_rg:release_service_addr[191] tail -1 +epprd_rg:release_service_addr[189] BOOT=61.81.244.134 +epprd_rg:release_service_addr[193] [[ -z 61.81.244.134 ]] +epprd_rg:release_service_addr[214] [[ -n 'en0 ' ]] +epprd_rg:release_service_addr[216] cut -f15 -d~ +epprd_rg:release_service_addr[216] cllsif -J '~' -Sn 61.81.244.156 +epprd_rg:release_service_addr[216] [[ AF_INET == AF_INET6 ]] +epprd_rg:release_service_addr[221] cl_swap_IP_address rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[462] version=1.9.14.8 +epprd_rg:cl_swap_IP_address[464] cl_get_path -S +epprd_rg:cl_swap_IP_address[464] OP_SEP='~' +epprd_rg:cl_swap_IP_address[465] LC_ALL=C +epprd_rg:cl_swap_IP_address[465] export LC_ALL +epprd_rg:cl_swap_IP_address[466] RESTORE_ROUTES=/usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[468] cl_echo 33 'Starting execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0' /usr/es/sbin/cluster/events/utils/cl_swap_IP_address 'rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0' Sep 28 2023 15:56:19Starting execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0+epprd_rg:cl_swap_IP_address[470] typeset -i oslevel +epprd_rg:cl_swap_IP_address[471] /usr/bin/sed s/-//g +epprd_rg:cl_swap_IP_address[471] /usr/bin/oslevel -r +epprd_rg:cl_swap_IP_address[471] oslevel=720005 +epprd_rg:cl_swap_IP_address[476] [[ 6 == 6 ]] +epprd_rg:cl_swap_IP_address[477] [[ 6 == 7 ]] +epprd_rg:cl_swap_IP_address[484] no -a +epprd_rg:cl_swap_IP_address[484] grep ipignoreredirects +epprd_rg:cl_swap_IP_address[484] awk '{ print $3 }' +epprd_rg:cl_swap_IP_address[484] PRIOR_IPIGNORE_REDIRECTS_VALUE=0 +epprd_rg:cl_swap_IP_address[485] /usr/sbin/no -o ipignoreredirects=1 Setting ipignoreredirects to 1 +epprd_rg:cl_swap_IP_address[490] PROC_RES=false +epprd_rg:cl_swap_IP_address[491] [[ SERVICE_LABELS != 0 ]] +epprd_rg:cl_swap_IP_address[491] [[ SERVICE_LABELS != GROUP ]] +epprd_rg:cl_swap_IP_address[492] PROC_RES=true +epprd_rg:cl_swap_IP_address[495] set -u +epprd_rg:cl_swap_IP_address[497] RC=0 +epprd_rg:cl_swap_IP_address[504] netstat -in Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll en0 1500 link#2 fa.e6.13.4e.a9.20 2088658616 0 1461165044 0 0 en0 1500 61.81.244 61.81.244.156 2088658616 0 1461165044 0 0 en0 1500 61.81.244 61.81.244.134 2088658616 0 1461165044 0 0 lo0 16896 link#1 1996315323 0 1996315323 0 0 lo0 16896 127 127.0.0.1 1996315323 0 1996315323 0 0 lo0 16896 ::1%1 1996315323 0 1996315323 0 0 +epprd_rg:cl_swap_IP_address[505] netstat -rnC Routing tables Destination Gateway Flags Wt Policy If Cost Config_Cost Route tree for Protocol Family 2 (Internet): default 61.81.244.1 UG 1 - en0 0 0 61.81.244.0 61.81.244.156 UHSb 1 - en0 0 0 => 61.81.244/24 61.81.244.156 U 1 - en0 0 0 61.81.244.134 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.156 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.255 61.81.244.156 UHSb 1 - en0 0 0 127/8 127.0.0.1 U 1 - lo0 0 0 Route tree for Protocol Family 24 (Internet v6): ::1%1 ::1%1 UH 1 - lo0 0 0 +epprd_rg:cl_swap_IP_address[506] CASC_OR_ROT=rotating +epprd_rg:cl_swap_IP_address[507] ACQ_OR_RLSE=release +epprd_rg:cl_swap_IP_address[508] IF=en0 +epprd_rg:cl_swap_IP_address[509] ADDR=61.81.244.134 +epprd_rg:cl_swap_IP_address[510] OLD_ADDR=61.81.244.156 +epprd_rg:cl_swap_IP_address[511] NETMASK=255.255.255.0 +epprd_rg:cl_swap_IP_address[514] [[ rotating == cascading ]] +epprd_rg:cl_swap_IP_address[525] cut -f3 -d~ +epprd_rg:cl_swap_IP_address[525] cllsif -J '~' -Sw -n 61.81.244.134 +epprd_rg:cl_swap_IP_address[525] NET=net_ether_01 +epprd_rg:cl_swap_IP_address[528] clodmget -qidentifier=61.81.244.134 -f max_aliases -n HACMPadapter +epprd_rg:cl_swap_IP_address[528] ALIAS_FIRST=0 +epprd_rg:cl_swap_IP_address[529] grep -c -w inet +epprd_rg:cl_swap_IP_address[529] ifconfig en0 +epprd_rg:cl_swap_IP_address[529] LC_ALL=C +epprd_rg:cl_swap_IP_address[529] NUM_ADDRS=2 +epprd_rg:cl_swap_IP_address[530] [[ release == acquire ]] +epprd_rg:cl_swap_IP_address[598] cl_echo 7320 'cl_swap_IP_address: Removing aliased IP address 61.81.244.156 from adapter en0' cl_swap_IP_address 61.81.244.156 en0 Sep 28 2023 15:56:19cl_swap_IP_address: Removing aliased IP address 61.81.244.156 from adapter en0+epprd_rg:cl_swap_IP_address[600] amlog_trace '' 'Deliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_swap_IP_address[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_swap_IP_address[amlog_trace:319] cltime +epprd_rg:cl_swap_IP_address[amlog_trace:319] DATE=2023-09-28T15:56:20.034871 +epprd_rg:cl_swap_IP_address[amlog_trace:320] echo '|2023-09-28T15:56:20.034871|INFO: Deliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_swap_IP_address[601] PERSISTENT='' +epprd_rg:cl_swap_IP_address[602] ADDR1=61.81.244.156 +epprd_rg:cl_swap_IP_address[603] disable_pmtu_gated Setting tcp_pmtu_discover to 0 Setting udp_pmtu_discover to 0 +epprd_rg:cl_swap_IP_address[604] alias_replace_routes /usr/es/sbin/cluster/.restore_routes en0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:168] RR=/usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[alias_replace_routes:169] shift +epprd_rg:cl_swap_IP_address[alias_replace_routes:170] interfaces=en0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:171] RC=0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:173] cp /dev/null /usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[alias_replace_routes:175] cat +epprd_rg:cl_swap_IP_address[alias_replace_routes:175] 1> /usr/es/sbin/cluster/.restore_routes 0<< \EOF +epprd_rg:cl_swap_IP_address[alias_replace_routes:175] date #!/bin/ksh # # Script created by cl_swap_IP_address on Thu Sep 28 15:56:20 KORST 2023 # PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin PS4='${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO] ' export VERBOSE_LOGGING=${VERBOSE_LOGGING:-"high"} [[ "$VERBOSE_LOGGING" = "high" ]] && set -x : Starting $0 at $(date) # EOF +epprd_rg:cl_swap_IP_address[alias_replace_routes:189] awk '$3 !~ "[Ll]ink" && $3 !~ ":" && $3 !~ "Network" {print $4}' +epprd_rg:cl_swap_IP_address[alias_replace_routes:189] netstat -in +epprd_rg:cl_swap_IP_address[alias_replace_routes:189] LOCADDRS=$'61.81.244.156\n61.81.244.134\n127.0.0.1' +epprd_rg:cl_swap_IP_address[alias_replace_routes:191] netstat -rnC Routing tables Destination Gateway Flags Wt Policy If Cost Config_Cost Route tree for Protocol Family 2 (Internet): default 61.81.244.1 UG 1 - en0 0 0 61.81.244.0 61.81.244.156 UHSb 1 - en0 0 0 => 61.81.244/24 61.81.244.156 U 1 - en0 0 0 61.81.244.134 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.156 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.255 61.81.244.156 UHSb 1 - en0 0 0 127/8 127.0.0.1 U 1 - lo0 0 0 Route tree for Protocol Family 24 (Internet v6): ::1%1 ::1%1 UH 1 - lo0 0 0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:200] I=1 +epprd_rg:cl_swap_IP_address[alias_replace_routes:200] typeset -li I +epprd_rg:cl_swap_IP_address[alias_replace_routes:201] NXTSVC='' +epprd_rg:cl_swap_IP_address[alias_replace_routes:203] awk '$3 !~ "[Ll]ink" && $3 !~ ":" && ($1 == "en0" || $1 == "en0*") {print $4}' +epprd_rg:cl_swap_IP_address[alias_replace_routes:203] netstat -in +epprd_rg:cl_swap_IP_address[alias_replace_routes:203] IFADDRS=$'61.81.244.156\n61.81.244.134' +epprd_rg:cl_swap_IP_address[alias_replace_routes:204] cllsif -J '~' -Spi epprda +epprd_rg:cl_swap_IP_address[alias_replace_routes:204] grep '~net_ether_01~' +epprd_rg:cl_swap_IP_address[alias_replace_routes:205] grep -E '~service~|~persistent~' +epprd_rg:cl_swap_IP_address[alias_replace_routes:205] cut -d~ -f7 +epprd_rg:cl_swap_IP_address[alias_replace_routes:205] sort -u +epprd_rg:cl_swap_IP_address[alias_replace_routes:204] SVCADDRS=61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:210] echo 61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:210] awk '$1 !~ ":" {print $1}' +epprd_rg:cl_swap_IP_address[alias_replace_routes:210] SVCADDRS=61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:212] cllsif -J '~' -Spi epprda +epprd_rg:cl_swap_IP_address[alias_replace_routes:212] grep '~net_ether_01~' +epprd_rg:cl_swap_IP_address[alias_replace_routes:213] grep -E '~persistent~' +epprd_rg:cl_swap_IP_address[alias_replace_routes:214] cut -d~ -f7 +epprd_rg:cl_swap_IP_address[alias_replace_routes:212] PERSISTENT_IP='' +epprd_rg:cl_swap_IP_address[alias_replace_routes:215] routeaddr='' +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:223] [[ -z '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:225] routeaddr=61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:227] [[ 61.81.244.156 != 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:234] [[ -n '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.134 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.134 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:221] [[ 61.81.244.134 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:234] [[ -n '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:243] NXTADDR='' +epprd_rg:cl_swap_IP_address[alias_replace_routes:244] bootaddr='' +epprd_rg:cl_swap_IP_address[alias_replace_routes:245] [[ -z '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:247] cllsif -J '~' -Spi epprda +epprd_rg:cl_swap_IP_address[alias_replace_routes:247] grep '~net_ether_01~' +epprd_rg:cl_swap_IP_address[alias_replace_routes:247] grep '~boot~' +epprd_rg:cl_swap_IP_address[alias_replace_routes:247] awk -F~ '$9 == "en0" { print $7; }' +epprd_rg:cl_swap_IP_address[alias_replace_routes:247] bootaddr=61.81.244.134 +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] [[ 61.81.244.156 == 61.81.244.134 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] [[ 61.81.244.134 == 61.81.244.134 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] clgetnet 61.81.244.134 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] clgetnet 61.81.244.134 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] clgetnet 61.81.244.134 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:250] [[ 61.81.244.134 != 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:252] NXTADDR=61.81.244.134 +epprd_rg:cl_swap_IP_address[alias_replace_routes:253] break +epprd_rg:cl_swap_IP_address[alias_replace_routes:258] swaproute=0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:259] NETSTAT_FLAGS='-nrf inet' +epprd_rg:cl_swap_IP_address[alias_replace_routes:261] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:264] swaproute=1 +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] netstat -nrf inet +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] fgrep -w en0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] read DEST GW FLAGS OTHER +epprd_rg:cl_swap_IP_address[alias_replace_routes:268] LOOPBACK=127.0.0.1 +epprd_rg:cl_swap_IP_address[alias_replace_routes:336] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:336] clgetnet 61.81.244.1 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:336] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:338] [[ 0 == 0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:341] [[ -z release ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:341] [[ 61.81.244.156 == ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:346] print 'cl_route_change default 127.0.0.1 61.81.244.1 inet' +epprd_rg:cl_swap_IP_address[alias_replace_routes:346] 1>> /usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[alias_replace_routes:347] add_rc_check /usr/es/sbin/cluster/.restore_routes cl_route_change +epprd_rg:cl_swap_IP_address[add_rc_check:70] RR=/usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[add_rc_check:71] FUNC=cl_route_change +epprd_rg:cl_swap_IP_address[add_rc_check:73] cat +epprd_rg:cl_swap_IP_address[add_rc_check:73] 1>> /usr/es/sbin/cluster/.restore_routes 0<< \EOF rc=$? if [[ $rc != 0 ]] then echo "ERROR: cl_route_change failed with code $rc" cl_route_change_RC=$rc fi EOF +epprd_rg:cl_swap_IP_address[alias_replace_routes:350] cl_route_change default 61.81.244.1 127.0.0.1 inet +epprd_rg:cl_swap_IP_address[alias_replace_routes:351] RC=0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:352] : cl_route_change completed with 0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:353] I=I+1 +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] read DEST GW FLAGS OTHER +epprd_rg:cl_swap_IP_address[alias_replace_routes:268] LOOPBACK=127.0.0.2 +epprd_rg:cl_swap_IP_address[alias_replace_routes:290] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:292] [[ '' != '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:292] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:294] route delete -host 61.81.244.0 61.81.244.156 61.81.244.156 host 61.81.244.0: gateway 61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] read DEST GW FLAGS OTHER +epprd_rg:cl_swap_IP_address[alias_replace_routes:268] LOOPBACK=127.0.0.2 +epprd_rg:cl_swap_IP_address[alias_replace_routes:272] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:272] clgetnet 61.81.244.156 255.255.255.0 +epprd_rg:cl_swap_IP_address[alias_replace_routes:272] [[ 61.81.244.0 == 61.81.244.0 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:274] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:276] [[ '' != '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:276] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:279] route delete -net 61.81.244/24 61.81.244.156 61.81.244.156 net 61.81.244: gateway 61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] read DEST GW FLAGS OTHER +epprd_rg:cl_swap_IP_address[alias_replace_routes:268] LOOPBACK=127.0.0.2 +epprd_rg:cl_swap_IP_address[alias_replace_routes:290] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:292] [[ '' != '' ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:292] [[ 61.81.244.156 == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[alias_replace_routes:294] route delete -host 61.81.244.255 61.81.244.156 61.81.244.156 host 61.81.244.255: gateway 61.81.244.156 +epprd_rg:cl_swap_IP_address[alias_replace_routes:267] read DEST GW FLAGS OTHER +epprd_rg:cl_swap_IP_address[alias_replace_routes:360] echo 'exit $cl_route_change_RC' +epprd_rg:cl_swap_IP_address[alias_replace_routes:360] 1>> /usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[alias_replace_routes:361] chmod +x /usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[alias_replace_routes:362] return 0 +epprd_rg:cl_swap_IP_address[605] RC=0 +epprd_rg:cl_swap_IP_address[606] : alias_replace_routes completed with 0 +epprd_rg:cl_swap_IP_address[609] clifconfig en0 delete 61.81.244.156 +epprd_rg:clifconfig[117] version=1.9 +epprd_rg:clifconfig[121] set -A args en0 delete 61.81.244.156 +epprd_rg:clifconfig[124] interface=en0 +epprd_rg:clifconfig[125] shift +epprd_rg:clifconfig[127] [[ -n delete ]] +epprd_rg:clifconfig[130] delete_val=1 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n 61.81.244.156 ]] +epprd_rg:clifconfig[147] params=' address=61.81.244.156' +epprd_rg:clifconfig[147] addr=61.81.244.156 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n '' ]] +epprd_rg:clifconfig[174] [[ -n 1 ]] +epprd_rg:clifconfig[174] [[ -n epprd_rg ]] +epprd_rg:clifconfig[175] clwparname epprd_rg +epprd_rg:clwparname[38] version=1.3.1.1 +epprd_rg:clwparname[44] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clwparname[44] [[ -z '' ]] +epprd_rg:clwparname[44] exit 0 +epprd_rg:clifconfig[175] WPARNAME='' +epprd_rg:clifconfig[176] (( 0 == 0 )) +epprd_rg:clifconfig[176] [[ -n '' ]] +epprd_rg:clifconfig[218] belongs_to_an_active_wpar 61.81.244.156 +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] [[ -z '' ]] +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] return 1 +epprd_rg:clifconfig[218] read wpar_name wpar_if wpar_netmask wpar_broadcast +epprd_rg:clifconfig[218] IFS='~' +epprd_rg:clifconfig[219] rc=1 +epprd_rg:clifconfig[221] [[ 1 == 0 ]] +epprd_rg:clifconfig[275] ifconfig en0 delete 61.81.244.156 +epprd_rg:cl_swap_IP_address[611] [[ 1 == 1 ]] +epprd_rg:cl_swap_IP_address[613] [[ -n '' ]] +epprd_rg:cl_swap_IP_address[662] [[ -n 61.81.244.134 ]] +epprd_rg:cl_swap_IP_address[671] (( 720005 <= 710003 )) +epprd_rg:cl_swap_IP_address[675] clifconfig en0 alias 61.81.244.134 netmask 255.255.255.0 +epprd_rg:clifconfig[117] version=1.9 +epprd_rg:clifconfig[121] set -A args en0 alias 61.81.244.134 netmask 255.255.255.0 +epprd_rg:clifconfig[124] interface=en0 +epprd_rg:clifconfig[125] shift +epprd_rg:clifconfig[127] [[ -n alias ]] +epprd_rg:clifconfig[129] alias_val=1 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n 61.81.244.134 ]] +epprd_rg:clifconfig[147] params=' address=61.81.244.134' +epprd_rg:clifconfig[147] addr=61.81.244.134 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n netmask ]] +epprd_rg:clifconfig[149] params=' address=61.81.244.134 netmask=255.255.255.0' +epprd_rg:clifconfig[149] shift +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n '' ]] +epprd_rg:clifconfig[174] [[ -n 1 ]] +epprd_rg:clifconfig[174] [[ -n epprd_rg ]] +epprd_rg:clifconfig[175] clwparname epprd_rg +epprd_rg:clwparname[38] version=1.3.1.1 +epprd_rg:clwparname[44] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clwparname[44] [[ -z '' ]] +epprd_rg:clwparname[44] exit 0 +epprd_rg:clifconfig[175] WPARNAME='' +epprd_rg:clifconfig[176] (( 0 == 0 )) +epprd_rg:clifconfig[176] [[ -n '' ]] +epprd_rg:clifconfig[218] belongs_to_an_active_wpar 61.81.244.134 +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] [[ -z '' ]] +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] return 1 +epprd_rg:clifconfig[218] read wpar_name wpar_if wpar_netmask wpar_broadcast +epprd_rg:clifconfig[218] IFS='~' +epprd_rg:clifconfig[219] rc=1 +epprd_rg:clifconfig[221] [[ 1 == 0 ]] +epprd_rg:clifconfig[275] ifconfig en0 alias 61.81.244.134 netmask 255.255.255.0 +epprd_rg:cl_swap_IP_address[679] /usr/es/sbin/cluster/.restore_routes +epprd_rg:.restore_routes[+9] date +epprd_rg:.restore_routes[+9] : Starting /usr/es/sbin/cluster/.restore_routes at Thu Sep 28 15:56:20 KORST 2023 +epprd_rg:.restore_routes[+11] cl_route_change default 127.0.0.1 61.81.244.1 inet +epprd_rg:.restore_routes[+12] rc=0 +epprd_rg:.restore_routes[+13] [[ 0 != 0 ]] +epprd_rg:.restore_routes[+19] exit +epprd_rg:cl_swap_IP_address[680] [[ 0 == 0 ]] +epprd_rg:cl_swap_IP_address[680] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[681] : Completed /usr/es/sbin/cluster/.restore_routes with return code 0 +epprd_rg:cl_swap_IP_address[682] enable_pmtu_gated Setting tcp_pmtu_discover to 1 Setting udp_pmtu_discover to 1 +epprd_rg:cl_swap_IP_address[685] hats_adapter_notify en0 -d 61.81.244.156 alias 2023-09-28T15:56:20.272898 hats_adapter_notify 2023-09-28T15:56:20.273896 hats_adapter_notify +epprd_rg:cl_swap_IP_address[688] check_alias_status en0 61.81.244.156 release +epprd_rg:cl_swap_IP_address[check_alias_status:108] CH_INTERFACE=en0 +epprd_rg:cl_swap_IP_address[check_alias_status:109] CH_ADDRESS=61.81.244.156 +epprd_rg:cl_swap_IP_address[check_alias_status:110] CH_ACQ_OR_RLSE=release +epprd_rg:cl_swap_IP_address[check_alias_status:118] IF_IB=en0 +epprd_rg:cl_swap_IP_address[check_alias_status:120] awk '{print index($0, "ib")}' +epprd_rg:cl_swap_IP_address[check_alias_status:120] echo en0 +epprd_rg:cl_swap_IP_address[check_alias_status:120] IS_IB=0 +epprd_rg:cl_swap_IP_address[check_alias_status:122] [[ 0 != 1 ]] +epprd_rg:cl_swap_IP_address[check_alias_status:124] clifconfig en0 +epprd_rg:cl_swap_IP_address[check_alias_status:124] fgrep -w 61.81.244.156 +epprd_rg:cl_swap_IP_address[check_alias_status:124] awk '{print $2}' +epprd_rg:clifconfig[117] version=1.9 +epprd_rg:clifconfig[121] set -A args en0 +epprd_rg:clifconfig[124] interface=en0 +epprd_rg:clifconfig[125] shift +epprd_rg:clifconfig[127] [[ -n '' ]] +epprd_rg:clifconfig[174] [[ -n '' ]] +epprd_rg:clifconfig[218] belongs_to_an_active_wpar +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] [[ -z '' ]] +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] return 1 +epprd_rg:clifconfig[218] read wpar_name wpar_if wpar_netmask wpar_broadcast +epprd_rg:clifconfig[218] IFS='~' +epprd_rg:clifconfig[219] rc=1 +epprd_rg:clifconfig[221] [[ 1 == 0 ]] +epprd_rg:clifconfig[275] ifconfig en0 +epprd_rg:cl_swap_IP_address[check_alias_status:124] ADDR='' +epprd_rg:cl_swap_IP_address[check_alias_status:129] [ release = acquire ] +epprd_rg:cl_swap_IP_address[check_alias_status:139] [[ '' == 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[check_alias_status:144] return 0 +epprd_rg:cl_swap_IP_address[689] RC1=0 +epprd_rg:cl_swap_IP_address[690] [[ 0 == 0 ]] +epprd_rg:cl_swap_IP_address[690] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[693] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[697] amlog_trace '' 'Deliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_swap_IP_address[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_swap_IP_address[amlog_trace:319] cltime +epprd_rg:cl_swap_IP_address[amlog_trace:319] DATE=2023-09-28T15:56:20.328275 +epprd_rg:cl_swap_IP_address[amlog_trace:320] echo '|2023-09-28T15:56:20.328275|INFO: Deliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_swap_IP_address[701] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[714] flush_arp +epprd_rg:cl_swap_IP_address[flush_arp:49] arp -an +epprd_rg:cl_swap_IP_address[flush_arp:49] grep '\?' +epprd_rg:cl_swap_IP_address[flush_arp:49] tr -d '()' +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.224 61.81.244.224 (61.81.244.224) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.239 61.81.244.239 (61.81.244.239) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.251 61.81.244.251 (61.81.244.251) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.252 61.81.244.252 (61.81.244.252) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.123 61.81.244.123 (61.81.244.123) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.126 61.81.244.126 (61.81.244.126) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.145 61.81.244.145 (61.81.244.145) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.146 61.81.244.146 (61.81.244.146) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.1 61.81.244.1 (61.81.244.1) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.154 61.81.244.154 (61.81.244.154) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:52] return 0 +epprd_rg:cl_swap_IP_address[716] netstat -in Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll en0 1500 link#2 fa.e6.13.4e.a9.20 2088658636 0 1461165050 0 0 en0 1500 61.81.244 61.81.244.134 2088658636 0 1461165050 0 0 lo0 16896 link#1 1996315327 0 1996315327 0 0 lo0 16896 127 127.0.0.1 1996315327 0 1996315327 0 0 lo0 16896 ::1%1 1996315327 0 1996315327 0 0 +epprd_rg:cl_swap_IP_address[717] netstat -rnC Routing tables Destination Gateway Flags Wt Policy If Cost Config_Cost Route tree for Protocol Family 2 (Internet): default 61.81.244.1 UG 1 - en0 0 0 61.81.244.0 61.81.244.134 UHSb 1 - en0 0 0 => 61.81.244/24 61.81.244.134 U 1 - en0 0 0 61.81.244.134 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.255 61.81.244.134 UHSb 1 - en0 0 0 127/8 127.0.0.1 U 1 - lo0 0 0 Route tree for Protocol Family 24 (Internet v6): ::1%1 ::1%1 UH 1 - lo0 0 0 +epprd_rg:cl_swap_IP_address[989] no -o ipignoreredirects=0 Setting ipignoreredirects to 0 +epprd_rg:cl_swap_IP_address[992] cl_echo 32 'Completed execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0. Exit status = 0' /usr/es/sbin/cluster/events/utils/cl_swap_IP_address 'rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0' 0 Sep 28 2023 15:56:20Completed execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating release en0 61.81.244.134 61.81.244.156 255.255.255.0. Exit status = 0+epprd_rg:cl_swap_IP_address[994] date Thu Sep 28 15:56:20 KORST 2023 +epprd_rg:cl_swap_IP_address[996] exit 0 +epprd_rg:release_service_addr[225] RC=0 +epprd_rg:release_service_addr[227] [[ 0 != 0 ]] +epprd_rg:release_service_addr[245] cl_RMupdate resource_down All_nonerror_service_addrs release_service_addr 2023-09-28T15:56:20.411625 2023-09-28T15:56:20.416067 +epprd_rg:release_service_addr[249] [[ UNDEFINED != UNDEFINED ]] +epprd_rg:release_service_addr[252] NSORDER='' +epprd_rg:release_service_addr[252] export NSORDER +epprd_rg:release_service_addr[255] exit 0 Sep 28 2023 15:56:20 EVENT COMPLETED: release_service_addr 0 |2023-09-28T15:56:20|8608|EVENT COMPLETED: release_service_addr 0| +epprd_rg:process_resources[release_service_labels:3129] RC=0 +epprd_rg:process_resources[release_service_labels:3131] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[release_service_labels:3146] (( 0 != 0 )) +epprd_rg:process_resources[release_service_labels:3152] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. +epprd_rg:process_resources[release_service_labels:3154] return 0 +epprd_rg:process_resources[3412] RC=0 +epprd_rg:process_resources[3413] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-09-28T15:56:22.656099 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=WPAR ACTION=RELEASE RESOURCE_GROUPS='"epprd_rg' '"' +epprd_rg:process_resources[1] JOB_TYPE=WPAR +epprd_rg:process_resources[1] ACTION=RELEASE +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ WPAR == RELEASE ]] +epprd_rg:process_resources[3360] [[ WPAR == ONLINE ]] +epprd_rg:process_resources[3492] process_wpars RELEASE +epprd_rg:process_resources[process_wpars:3265] PS4_FUNC=process_wpars +epprd_rg:process_resources[process_wpars:3265] typeset PS4_FUNC +epprd_rg:process_resources[process_wpars:3266] [[ high == high ]] +epprd_rg:process_resources[process_wpars:3266] set -x +epprd_rg:process_resources[process_wpars:3267] STAT=0 +epprd_rg:process_resources[process_wpars:3268] action=RELEASE +epprd_rg:process_resources[process_wpars:3268] typeset action +epprd_rg:process_resources[process_wpars:3272] export GROUPNAME +epprd_rg:process_resources[process_wpars:3280] clstop_wpar +epprd_rg:clstop_wpar[42] version=1.7 +epprd_rg:clstop_wpar[46] [[ rg_move == reconfig_resource_release ]] +epprd_rg:clstop_wpar[46] [[ RELEASE_PRIMARY == reconfig_resource_release ]] +epprd_rg:clstop_wpar[55] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clstop_wpar[55] [[ -z '' ]] +epprd_rg:clstop_wpar[55] exit 0 +epprd_rg:process_resources[process_wpars:3281] RC=0 +epprd_rg:process_resources[process_wpars:3285] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[process_wpars:3294] return 0 +epprd_rg:process_resources[3493] RC=0 +epprd_rg:process_resources[3495] [[ RELEASE == RELEASE ]] +epprd_rg:process_resources[3497] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-09-28T15:56:22.689324 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=OFFLINE RESOURCE_GROUPS='"epprd_rg"' +epprd_rg:process_resources[1] JOB_TYPE=OFFLINE +epprd_rg:process_resources[1] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ OFFLINE == RELEASE ]] +epprd_rg:process_resources[3360] [[ OFFLINE == ONLINE ]] +epprd_rg:process_resources[3681] set_resource_group_state DOWN +epprd_rg:process_resources[set_resource_group_state:82] PS4_FUNC=set_resource_group_state +epprd_rg:process_resources[set_resource_group_state:82] typeset PS4_FUNC +epprd_rg:process_resources[set_resource_group_state:83] [[ high == high ]] +epprd_rg:process_resources[set_resource_group_state:83] set -x +epprd_rg:process_resources[set_resource_group_state:84] STAT=0 +epprd_rg:process_resources[set_resource_group_state:85] new_status=DOWN +epprd_rg:process_resources[set_resource_group_state:89] export GROUPNAME +epprd_rg:process_resources[set_resource_group_state:90] [[ DOWN != DOWN ]] +epprd_rg:process_resources[set_resource_group_state:100] : Resource Manager Updates +epprd_rg:process_resources[set_resource_group_state:122] cl_RMupdate rg_down epprd_rg process_resources 2023-09-28T15:56:22.724273 2023-09-28T15:56:22.734590 +epprd_rg:process_resources[set_resource_group_state:124] amlog_trace '' 'acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:318] clcycle clavailability.log +epprd_rg:process_resources[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:process_resources[amlog_trace:319] cltime +epprd_rg:process_resources[amlog_trace:319] DATE=2023-09-28T15:56:22.773613 +epprd_rg:process_resources[amlog_trace:320] echo '|2023-09-28T15:56:22.773613|INFO: acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:process_resources[set_resource_group_state:153] return 0 +epprd_rg:process_resources[3682] RC=0 +epprd_rg:process_resources[3683] postvg_for_rdisk +epprd_rg:process_resources[postvg_for_rdisk:856] PS4_FUNC=postvg_for_rdisk +epprd_rg:process_resources[postvg_for_rdisk:856] typeset PS4_FUNC +epprd_rg:process_resources[postvg_for_rdisk:857] [[ high == high ]] +epprd_rg:process_resources[postvg_for_rdisk:857] set -x +epprd_rg:process_resources[postvg_for_rdisk:858] STAT=0 +epprd_rg:process_resources[postvg_for_rdisk:859] FAILURE_IN_METHOD=0 +epprd_rg:process_resources[postvg_for_rdisk:859] typeset -li FAILURE_IN_METHOD +epprd_rg:process_resources[postvg_for_rdisk:860] LIST_OF_FAILED_RGS='' +epprd_rg:process_resources[postvg_for_rdisk:861] RG_LIST=epprd_rg +epprd_rg:process_resources[postvg_for_rdisk:862] RDISK_LIST='' +epprd_rg:process_resources[postvg_for_rdisk:863] DISK_LIST='' +epprd_rg:process_resources[postvg_for_rdisk:866] : Resource groups are processed individually. This is required because +epprd_rg:process_resources[postvg_for_rdisk:867] : the replication mechanism may differ between resource groups. +epprd_rg:process_resources[postvg_for_rdisk:871] getReplicatedResources epprd_rg +epprd_rg:process_resources[getReplicatedResources:699] PS4_FUNC=getReplicatedResources +epprd_rg:process_resources[getReplicatedResources:699] typeset PS4_FUNC +epprd_rg:process_resources[getReplicatedResources:700] [[ high == high ]] +epprd_rg:process_resources[getReplicatedResources:700] set -x +epprd_rg:process_resources[getReplicatedResources:702] RV=false +epprd_rg:process_resources[getReplicatedResources:704] clodmget -n -f type HACMPrresmethods +epprd_rg:process_resources[getReplicatedResources:704] [[ -n 9 ]] +epprd_rg:process_resources[getReplicatedResources:707] : Replicated resource methods are defined, check for resources +epprd_rg:process_resources[getReplicatedResources:709] clodmget -q $'name like \'*_REP_RESOURCE\' AND group=epprd_rg' -f value -n HACMPresource +epprd_rg:process_resources[getReplicatedResources:709] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:718] : Verify if any backup profiles are configured and trigger cbm utilities based on that +epprd_rg:process_resources[getReplicatedResources:720] clodmget -q name=BACKUP_ENABLED -f value HACMPresource +epprd_rg:process_resources[getReplicatedResources:720] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:739] echo false +epprd_rg:process_resources[postvg_for_rdisk:871] REPLICATED_RESOURCES=false +epprd_rg:process_resources[postvg_for_rdisk:873] [[ false == true ]] +epprd_rg:process_resources[postvg_for_rdisk:946] return 0 +epprd_rg:process_resources[3684] (( 0 != 0 )) +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-09-28T15:56:22.798598 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=NONE +epprd_rg:process_resources[1] JOB_TYPE=NONE +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ NONE == RELEASE ]] +epprd_rg:process_resources[3360] [[ NONE == ONLINE ]] +epprd_rg:process_resources[3729] break +epprd_rg:process_resources[3740] : If sddsrv was turned off above, turn it back on again +epprd_rg:process_resources[3742] [[ FALSE == TRUE ]] +epprd_rg:process_resources[3747] exit 0 :rg_move[247] : unsetting AM_SYNC_CALLED_BY from $'callers environment as\n: we dont' require it after this point in execution. :rg_move[250] unset AM_SYNC_CALLED_BY :rg_move[253] [[ -f /tmp/.NFSSTOPPED ]] :rg_move[274] [[ -f /tmp/.RPCLOCKDSTOPPED ]] :rg_move[293] exit 0 Sep 28 2023 15:56:22 EVENT COMPLETED: rg_move epprda 1 RELEASE 0 |2023-09-28T15:56:22|8608|EVENT COMPLETED: rg_move epprda 1 RELEASE 0| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-09-28T15:56:22.905118 :clevlog[amlog_trace:320] echo '|2023-09-28T15:56:22.905118|INFO: rg_move|epprd_rg|epprda|1|RELEASE|0' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :rg_move_release[+68] exit 0 Sep 28 2023 15:56:22 EVENT COMPLETED: rg_move_release epprda 1 0 |2023-09-28T15:56:22|8608|EVENT COMPLETED: rg_move_release epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-28T15:56:23.030977 + echo '|2023-09-28T15:56:23.030977|INFO: rg_move_release|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log Sep 28 2023 15:56:23 EVENT START: rg_move_fence epprda 1 |2023-09-28T15:56:23|8608|EVENT START: rg_move_fence epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-28T15:56:23.228564 + echo '|2023-09-28T15:56:23.228564|INFO: rg_move_fence|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_fence[62] [[ high == high ]] :rg_move_fence[62] version=1.11 :rg_move_fence[63] NODENAME=epprda :rg_move_fence[63] export NODENAME :rg_move_fence[65] set -u :rg_move_fence[67] [ 2 != 2 ] :rg_move_fence[73] set +u :rg_move_fence[75] [[ -z TRUE ]] :rg_move_fence[80] [[ TRUE == TRUE ]] :rg_move_fence[82] LOCAL_NODENAME=epprda :rg_move_fence[83] odmget -qid=1 HACMPgroup :rg_move_fence[83] egrep 'group =' :rg_move_fence[83] awk '{print $3}' :rg_move_fence[83] eval RGNAME='"epprd_rg"' :rg_move_fence[1] RGNAME=epprd_rg +epprd_rg:rg_move_fence[84] GROUPNAME=epprd_rg +epprd_rg:rg_move_fence[85] group_state='$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[86] set +u +epprd_rg:rg_move_fence[87] eval print '$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[1] print ONLINE +epprd_rg:rg_move_fence[87] RG_MOVE_ONLINE=ONLINE +epprd_rg:rg_move_fence[87] export RG_MOVE_ONLINE +epprd_rg:rg_move_fence[88] set -u +epprd_rg:rg_move_fence[89] RG_MOVE_ONLINE=ONLINE +epprd_rg:rg_move_fence[91] set -a +epprd_rg:rg_move_fence[92] clsetenvgrp epprda rg_move epprd_rg '' :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 +epprd_rg:rg_move_fence[92] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[93] RC=0 +epprd_rg:rg_move_fence[94] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[1] FORCEDOWN_GROUPS='' +epprd_rg:rg_move_fence[2] RESOURCE_GROUPS='' +epprd_rg:rg_move_fence[3] HOMELESS_GROUPS='' +epprd_rg:rg_move_fence[4] HOMELESS_FOLLOWER_GROUPS='' +epprd_rg:rg_move_fence[5] ERRSTATE_GROUPS='' +epprd_rg:rg_move_fence[6] PRINCIPAL_ACTIONS='' +epprd_rg:rg_move_fence[7] ASSOCIATE_ACTIONS='' +epprd_rg:rg_move_fence[8] AUXILLIARY_ACTIONS='' +epprd_rg:rg_move_fence[8] SIBLING_GROUPS='' +epprd_rg:rg_move_fence[9] SIBLING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[10] SIBLING_ACQUIRING_GROUPS='' +epprd_rg:rg_move_fence[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[12] SIBLING_RELEASING_GROUPS='' +epprd_rg:rg_move_fence[13] SIBLING_RELEASING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[95] set +a +epprd_rg:rg_move_fence[96] [ 0 -ne 0 ] +epprd_rg:rg_move_fence[103] process_resources FENCE :rg_move_fence[3318] version=1.169 :rg_move_fence[3321] STATUS=0 :rg_move_fence[3322] sddsrv_off=FALSE :rg_move_fence[3324] true :rg_move_fence[3326] : call rgpa, and it will tell us what to do next :rg_move_fence[3328] set -a :rg_move_fence[3329] clRGPA FENCE :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa FENCE 2023-09-28T15:56:23.333235 clrgpa :clRGPA[+55] exit 0 :rg_move_fence[3329] eval JOB_TYPE=NONE :rg_move_fence[1] JOB_TYPE=NONE :rg_move_fence[3330] RC=0 :rg_move_fence[3331] set +a :rg_move_fence[3333] (( 0 != 0 )) :rg_move_fence[3342] RESOURCE_GROUPS='' :rg_move_fence[3343] GROUPNAME='' :rg_move_fence[3343] export GROUPNAME :rg_move_fence[3353] IS_SERVICE_START=1 :rg_move_fence[3354] IS_SERVICE_STOP=1 :rg_move_fence[3360] [[ NONE == RELEASE ]] :rg_move_fence[3360] [[ NONE == ONLINE ]] :rg_move_fence[3729] break :rg_move_fence[3740] : If sddsrv was turned off above, turn it back on again :rg_move_fence[3742] [[ FALSE == TRUE ]] :rg_move_fence[3747] exit 0 +epprd_rg:rg_move_fence[104] : exit status of process_resources FENCE is: 0 +epprd_rg:rg_move_fence[107] [[ TRUE == TRUE ]] +epprd_rg:rg_move_fence[109] export EVENT_TYPE +epprd_rg:rg_move_fence[110] echo RELEASE_PRIMARY RELEASE_PRIMARY +epprd_rg:rg_move_fence[111] [[ -n '' ]] +epprd_rg:rg_move_fence[141] exit 0 Sep 28 2023 15:56:23 EVENT COMPLETED: rg_move_fence epprda 1 0 |2023-09-28T15:56:23|8608|EVENT COMPLETED: rg_move_fence epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-28T15:56:23.426652 + echo '|2023-09-28T15:56:23.426652|INFO: rg_move_fence|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log PowerHA SystemMirror Event Summary ---------------------------------------------------------------------------- Serial number for this event: 8608 Event: TE_RG_MOVE_RELEASE Start time: Thu Sep 28 15:55:45 2023 End time: Thu Sep 28 15:56:23 2023 Action: Resource: Script Name: ---------------------------------------------------------------------------- Releasing resource group: epprd_rg process_resources Search on: Thu.Sep.28.15:55:46.KORST.2023.process_resources.epprd_rg.ref Releasing resource: All_servers stop_server Search on: Thu.Sep.28.15:55:46.KORST.2023.stop_server.All_servers.epprd_rg.ref Resource offline: All_nonerror_servers stop_server Search on: Thu.Sep.28.15:55:57.KORST.2023.stop_server.All_nonerror_servers.epprd_rg.ref Releasing resource: All_nfs_mounts cl_deactivate_nfs Search on: Thu.Sep.28.15:55:58.KORST.2023.cl_deactivate_nfs.All_nfs_mounts.epprd_rg.ref Resource offline: All_nonerror_nfs_mounts cl_deactivate_nfs Search on: Thu.Sep.28.15:56:02.KORST.2023.cl_deactivate_nfs.All_nonerror_nfs_mounts.epprd_rg.ref Releasing resource: All_exports cl_unexport_fs Search on: Thu.Sep.28.15:56:02.KORST.2023.cl_unexport_fs.All_exports.epprd_rg.ref Resource offline: All_nonerror_exports cl_unexport_fs Search on: Thu.Sep.28.15:56:02.KORST.2023.cl_unexport_fs.All_nonerror_exports.epprd_rg.ref Releasing resource: All_filesystems cl_deactivate_fs Search on: Thu.Sep.28.15:56:02.KORST.2023.cl_deactivate_fs.All_filesystems.epprd_rg.ref Resource offline: All_non_error_filesystems cl_deactivate_fs Search on: Thu.Sep.28.15:56:18.KORST.2023.cl_deactivate_fs.All_non_error_filesystems.epprd_rg.ref Releasing resource: All_volume_groups cl_deactivate_vgs Search on: Thu.Sep.28.15:56:18.KORST.2023.cl_deactivate_vgs.All_volume_groups.epprd_rg.ref Resource offline: All_nonerror_volume_groups cl_deactivate_vgs Search on: Thu.Sep.28.15:56:19.KORST.2023.cl_deactivate_vgs.All_nonerror_volume_groups.epprd_rg.ref Releasing resource: All_service_addrs release_service_addr Search on: Thu.Sep.28.15:56:19.KORST.2023.release_service_addr.All_service_addrs.epprd_rg.ref Resource offline: All_nonerror_service_addrs release_service_addr Search on: Thu.Sep.28.15:56:20.KORST.2023.release_service_addr.All_nonerror_service_addrs.epprd_rg.ref Resource group offline: epprd_rg process_resources Search on: Thu.Sep.28.15:56:22.KORST.2023.process_resources.epprd_rg.ref ---------------------------------------------------------------------------- |EVENT_SUMMARY_START|TE_RG_MOVE_RELEASE|2023-09-28T15:55:45|2023-09-28T15:56:23|8608| |EV_SUM_SEARCHON_STR|Thu.Sep.28.15:55:46.KORST.2023.process_resources.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Thu.Sep.28.15:55:46.KORST.2023.stop_server.All_servers.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Thu.Sep.28.15:55:57.KORST.2023.stop_server.All_nonerror_servers.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Thu.Sep.28.15:55:58.KORST.2023.cl_deactivate_nfs.All_nfs_mounts.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Thu.Sep.28.15:56:02.KORST.2023.cl_deactivate_nfs.All_nonerror_nfs_mounts.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Thu.Sep.28.15:56:02.KORST.2023.cl_unexport_fs.All_exports.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Thu.Sep.28.15:56:02.KORST.2023.cl_unexport_fs.All_nonerror_exports.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Thu.Sep.28.15:56:02.KORST.2023.cl_deactivate_fs.All_filesystems.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Thu.Sep.28.15:56:18.KORST.2023.cl_deactivate_fs.All_non_error_filesystems.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Thu.Sep.28.15:56:18.KORST.2023.cl_deactivate_vgs.All_volume_groups.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Thu.Sep.28.15:56:19.KORST.2023.cl_deactivate_vgs.All_nonerror_volume_groups.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Thu.Sep.28.15:56:19.KORST.2023.release_service_addr.All_service_addrs.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Thu.Sep.28.15:56:20.KORST.2023.release_service_addr.All_nonerror_service_addrs.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Thu.Sep.28.15:56:22.KORST.2023.process_resources.epprd_rg.ref.ref| |EVENT_SUMMARY_END| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 8611 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_NETWORK|2023-09-28T15:56:24|8611| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Sep 28 2023 15:56:24 EVENT START: network_up epprda net_ether_01 |2023-09-28T15:56:24|8611|EVENT START: network_up epprda net_ether_01| :network_up[+66] version=%I% :network_up[+69] set -a :network_up[+70] cllsparam -n epprda :network_up[+70] eval NODE_NAME=epprda VERBOSE_LOGGING=high PS4='${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO] ' DEBUG_LEVEL=Standard LC_ALL='C' :network_up[+70] NODE_NAME=epprda VERBOSE_LOGGING=high PS4=${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO] DEBUG_LEVEL=Standard LC_ALL=C :network_up[+71] set +a :network_up[+73] STATUS=0 :network_up[+75] [ 2 -ne 2 ] :network_up[+81] [[ epprda == epprda ]] :network_up[+82] amlog_trace 8611|epprda|net_ether_01 :network_up[+61] clcycle clavailability.log :network_up[+61] 1> /dev/null 2>& 1 :network_up[+61] :network_up[+61] cltime DATE=2023-09-28T15:56:25.056827 :network_up[+61] echo |2023-09-28T15:56:25.056827|INFO: 8611|epprda|net_ether_01 :network_up[+61] 1>> /var/hacmp/availability/clavailability.log :network_up[+84] export NETWORKNAME=net_ether_01 :network_up[+89] [[ epprda == epprda ]] :network_up[+90] amlog_trace 8611|epprda|net_ether_01 :network_up[+61] clcycle clavailability.log :network_up[+61] 1> /dev/null 2>& 1 :network_up[+61] :network_up[+61] cltime DATE=2023-09-28T15:56:25.084839 :network_up[+61] echo |2023-09-28T15:56:25.084839|INFO: 8611|epprda|net_ether_01 :network_up[+61] 1>> /var/hacmp/availability/clavailability.log :network_up[+92] exit 0 Sep 28 2023 15:56:25 EVENT COMPLETED: network_up epprda net_ether_01 0 |2023-09-28T15:56:25|8611|EVENT COMPLETED: network_up epprda net_ether_01 0| Sep 28 2023 15:56:25 EVENT START: network_up_complete epprda net_ether_01 |2023-09-28T15:56:25|8611|EVENT START: network_up_complete epprda net_ether_01| :network_up_complete[+68] version=%I% :network_up_complete[+72] [ 2 -ne 2 ] :network_up_complete[+78] [[ epprda == epprda ]] :network_up_complete[+79] amlog_trace 8611|epprda|net_ether_01 :network_up_complete[+61] clcycle clavailability.log :network_up_complete[+61] 1> /dev/null 2>& 1 :network_up_complete[+61] :network_up_complete[+61] cltime DATE=2023-09-28T15:56:25.346056 :network_up_complete[+61] echo |2023-09-28T15:56:25.346056|INFO: 8611|epprda|net_ether_01 :network_up_complete[+61] 1>> /var/hacmp/availability/clavailability.log :network_up_complete[+82] NODENAME=epprda :network_up_complete[+83] NETWORK=net_ether_01 :network_up_complete[+84] export NETWORKNAME=net_ether_01 :network_up_complete[+86] [[ -z ]] :network_up_complete[+88] EMULATE=REAL :network_up_complete[+90] set -u :network_up_complete[+96] STATUS=0 :network_up_complete[+100] odmget HACMPnode :network_up_complete[+100] grep name = :network_up_complete[+100] sort :network_up_complete[+100] uniq :network_up_complete[+100] wc -l :network_up_complete[+100] [ 2 -eq 2 ] :network_up_complete[+102] :network_up_complete[+102] odmget HACMPgroup :network_up_complete[+102] grep group = :network_up_complete[+102] awk {print $3} :network_up_complete[+102] sed s/"//g RESOURCE_GROUPS=epprd_rg :network_up_complete[+106] :network_up_complete[+106] odmget -q group=epprd_rg AND name=EXPORT_FILESYSTEM HACMPresource :network_up_complete[+106] grep value :network_up_complete[+106] awk {print $3} :network_up_complete[+106] sed s/"//g EXPORTLIST=/board_org /sapmnt/EPP :network_up_complete[+107] [ -n /board_org /sapmnt/EPP ] :network_up_complete[+109] [ REAL = EMUL ] :network_up_complete[+114] cl_update_statd :cl_update_statd(0)[+174] version=%I% :cl_update_statd(0)[+176] typeset -i RC=0 :cl_update_statd(0)[+178] LOCAL_FOUND= :cl_update_statd(0)[+179] TWIN_NAME= :cl_update_statd(0)[+180] [[ -z epprda ]] :cl_update_statd(0)[+181] :cl_update_statd(0)[+181] cl_get_path -S OP_SEP=~ :cl_update_statd(0)[+182] set -u :cl_update_statd(0)[+187] LOCAL_FOUND=true :cl_update_statd(0)[+189] TWIN_NAME=epprds :cl_update_statd(0)[+194] : Make sure statd is running locally :cl_update_statd(0)[+196] lssrc -s statd :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw inoperative :cl_update_statd(0)[+196] rpcinfo -p :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw status :cl_update_statd(0)[+207] : Get the current twin, if there is one :cl_update_statd(0)[+209] :cl_update_statd(0)[+209] nfso -H sm_gethost :cl_update_statd(0)[+209] 2>& 1 CURTWIN=epprds :cl_update_statd(0)[+210] RC=0 :cl_update_statd(0)[+212] [[ -z true ]] :cl_update_statd(0)[+212] [[ -z epprds ]] :cl_update_statd(0)[+225] : Get the interface to the twin node :cl_update_statd(0)[+227] :cl_update_statd(0)[+227] get_node_ip epprds :cl_update_statd(0)[+9] (( 1 != 1 )) :cl_update_statd(0)[+15] Twin_Name=epprds :cl_update_statd(0)[+16] NewTwin= :cl_update_statd(0)[+19] : Get the Interface details for every interface on the twin node :cl_update_statd(0)[+20] : Reject interfaces on nodes that are not public boot addresses :cl_update_statd(0)[+21] : because those are the only ones we have state information for :cl_update_statd(0)[+23] :cl_update_statd(0)[+23] cllsif -J ~ -Sw -i epprda :cl_update_statd(0)[+23] LC_ALL=C LOCAL_NETWORK_INFO=epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+24] cllsif -J ~ -Sw -i epprds :cl_update_statd(0)[+24] LC_ALL=C :cl_update_statd(0)[+25] read adapt type network net_type attrib node ip_addr skip interface skip netmask skip skip prefix ip_family :cl_update_statd(0)[+25] IFS=~ :cl_update_statd(0)[+25] [[ public != public ]] :cl_update_statd(0)[+25] [[ boot != boot ]] :cl_update_statd(0)[+33] : Find the state of this candidate :cl_update_statd(0)[+33] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+37] :cl_update_statd(0)[+37] print 61.81.244.123 :cl_update_statd(0)[+37] tr ./ xx addr=i61x81x244x123_epprds :cl_update_statd(0)[+43] eval candidate_state=${i61x81x244x123_epprds:-down} :cl_update_statd(0)[+43] candidate_state=UP :cl_update_statd(0)[+46] : If state is UP, check to see if this node can talk to it :cl_update_statd(0)[+46] [[ UP == UP ]] :cl_update_statd(0)[+50] ping -w 5 -c 1 -q 61.81.244.123 :cl_update_statd(0)[+50] 1> /dev/null :cl_update_statd(0)[+61] echo epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+61] tr \n :cl_update_statd(0)[+62] read lcl_adapt lcl_type lcl_network lcl_net_type lcl_attrib lcl_node lcl_ip_addr skip lcl_interface skip lcl_netmask skip skip lcl_prefix lcl_ip_family :cl_update_statd(0)[+62] IFS=~ :cl_update_statd(0)[+62] [[ net_ether_01 != net_ether_01 ]] :cl_update_statd(0)[+62] [[ boot != boot ]] :cl_update_statd(0)[+62] [[ public != public ]] :cl_update_statd(0)[+62] [[ AF_INET != AF_INET ]] :cl_update_statd(0)[+62] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+71] :cl_update_statd(0)[+71] print 61.81.244.134 :cl_update_statd(0)[+71] tr ./ xx addr=i61x81x244x134_epprda :cl_update_statd(0)[+77] eval lcl_candidate_state=${i61x81x244x134_epprda:-down} :cl_update_statd(0)[+77] lcl_candidate_state=UP :cl_update_statd(0)[+77] [[ UP == UP ]] :cl_update_statd(0)[+81] : epprds is on the same network as an interface that is up :cl_update_statd(0)[+82] : on the local node, and the attributes match. :cl_update_statd(0)[+84] NewTwin=epprds :cl_update_statd(0)[+85] break :cl_update_statd(0)[+85] [[ -n epprds ]] :cl_update_statd(0)[+91] break :cl_update_statd(0)[+91] [[ -z epprds ]] :cl_update_statd(0)[+100] echo epprds :cl_update_statd(0)[+101] return 0 NEWTWIN=epprds :cl_update_statd(0)[+227] [[ -z epprds ]] :cl_update_statd(0)[+227] [[ epprds != epprds ]] :cl_update_statd(0)[+259] : RC is actually 0 :cl_update_statd(0)[+266] return 0 :network_up_complete[+115] [ 0 -ne 0 ] :network_up_complete[+120] break :network_up_complete[+125] [[ epprda == epprda ]] :network_up_complete[+131] :network_up_complete[+131] odmget -qname=net_ether_01 HACMPnetwork :network_up_complete[+131] awk $1 == "alias" {print $3} :network_up_complete[+131] sed s/"//g ALIASING=1 :network_up_complete[+131] [[ 1 == 1 ]] :network_up_complete[+133] cl_configure_persistent_address aliasing_network_up -n net_ether_01 :cl_configure_persistent_address[1344] version=1.56.1.4 :cl_configure_persistent_address[1346] cl_get_path -S :cl_configure_persistent_address[1346] OP_SEP='~' :cl_configure_persistent_address[1349] get_local_nodename :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :cl_configure_persistent_address[1349] LOCALNODENAME=epprda :cl_configure_persistent_address[1354] [[ -z epprda ]] :cl_configure_persistent_address[1356] NETWORK='' :cl_configure_persistent_address[1357] ALIVE_IF='' :cl_configure_persistent_address[1358] FAILED_IF='' :cl_configure_persistent_address[1359] FAILED_ADDRESS='' :cl_configure_persistent_address[1360] UPDATE_CLSTRMGR=1 :cl_configure_persistent_address[1361] CHECK_HA_ALIVE=1 :cl_configure_persistent_address[1362] RESTORE_ROUTES=/usr/es/sbin/cluster/.pers_restore_routes :cl_configure_persistent_address[1363] RC=0 :cl_configure_persistent_address[1364] B_FLAG=0 :cl_configure_persistent_address[1366] ACTION=aliasing_network_up :cl_configure_persistent_address[1367] shift :cl_configure_persistent_address[1369] getopt n:a:f:i:dPB -n net_ether_01 :cl_configure_persistent_address[1369] set -- -n net_ether_01 -- :cl_configure_persistent_address[1371] (( 0 != 0 )) :cl_configure_persistent_address[1371] [[ -z aliasing_network_up ]] :cl_configure_persistent_address[1376] [[ -n != -- ]] :cl_configure_persistent_address[1379] NETWORK=net_ether_01 :cl_configure_persistent_address[1380] shift :cl_configure_persistent_address[1380] shift :cl_configure_persistent_address[1376] [[ -- != -- ]] :cl_configure_persistent_address[1418] shift :cl_configure_persistent_address[1422] [[ aliasing_network_up == up ]] :cl_configure_persistent_address[1520] [[ aliasing_network_up == swap ]] :cl_configure_persistent_address[1667] [[ aliasing_network_up == fail_boot ]] :cl_configure_persistent_address[1830] [[ aliasing_network_up == aliasing_network_up ]] :cl_configure_persistent_address[1831] [[ -z net_ether_01 ]] :cl_configure_persistent_address[1837] isAliasingNetwork net_ether_01 :cl_configure_persistent_address[isAliasingNetwork:386] PS4_FUNC=isAliasingNetwork :cl_configure_persistent_address[isAliasingNetwork:386] typeset PS4_FUNC :cl_configure_persistent_address[isAliasingNetwork:387] [[ high == high ]] :cl_configure_persistent_address[isAliasingNetwork:387] set -x :cl_configure_persistent_address[isAliasingNetwork:389] NETWORK=net_ether_01 :cl_configure_persistent_address[isAliasingNetwork:391] odmget -qname=net_ether_01 HACMPnetwork :cl_configure_persistent_address[isAliasingNetwork:392] awk '$1 == "alias" {print $3}' :cl_configure_persistent_address[isAliasingNetwork:393] sed 's/"//g' :cl_configure_persistent_address[isAliasingNetwork:391] print 1 :cl_configure_persistent_address[1837] [[ 1 != 1 ]] :cl_configure_persistent_address[1842] cllsif -J '~' -Spi epprda :cl_configure_persistent_address[1842] awk -F~ '$2 == "persistent" && $3 == "net_ether_01" {print $1}' :cl_configure_persistent_address[1842] PERSISTENT='' :cl_configure_persistent_address[1844] [[ -z '' ]] :cl_configure_persistent_address[1846] exit 0 :network_up_complete[+141] :network_up_complete[+141] cl_rrmethods2call net_initialization :cl_rrmethods2call[56] version=%I% :cl_rrmethods2call[84] RRMETHODS='' :cl_rrmethods2call[85] NEED_RR_ENV_VARS=no :cl_rrmethods2call[90] : The network methods are returned if the Network type is XD_data. :cl_rrmethods2call[92] clodmget -qname=net_ether_01 -f nimname -n HACMPnetwork :cl_rrmethods2call[92] RRNET=ether :cl_rrmethods2call[94] [[ ether == XD_data ]] :cl_rrmethods2call[98] return 0 METHODS= :network_up_complete[+163] :network_up_complete[+163] clodmget -n -q name=MOUNT_FILESYSTEM -f group HACMPresource CROSSMOUNTS=epprd_rg :network_up_complete[+165] [ -n epprd_rg -a epprda = epprda ] :network_up_complete[+168] : Remount any NFS cross mount if required :network_up_complete[+174] :network_up_complete[+174] clodmget -n -f group HACMPgroup RESOURCE_GROUPS=epprd_rg :network_up_complete[+185] :network_up_complete[+185] clodmget -n -q name=MOUNT_FILESYSTEM and group=epprd_rg -f value HACMPresource MOUNT_FILESYSTEM=/board;/board_org :network_up_complete[+185] [[ -z /board;/board_org ]] :network_up_complete[+189] IN_RG=false :network_up_complete[+189] clodmget -n -q group=epprd_rg -f nodes HACMPgroup :network_up_complete[+189] [[ epprda == epprda ]] :network_up_complete[+192] IN_RG=true :network_up_complete[+192] [[ epprds == epprda ]] :network_up_complete[+192] [[ true == false ]] :network_up_complete[+197] :network_up_complete[+197] clRGinfo -s epprd_rg clRGinfo[431]: version I clRGinfo[517]: Number of resource groups = 1 clRGinfo[562]: cluster epprda_cluster is version = 22 clRGinfo[1439]: IPC target host name is 'localhost' clRGinfo[685]: Current group is 'epprd_rg' :network_up_complete[+197] awk -F : { if ( $2 == "ONLINE" ) print $3 } get primary state info for state 4 get secondary state info for state 4 getPrimaryStateStr: using primary_table => primary_state_for_short_output_table get primary state info for state 4 get secondary state info for state 4 getPreviousStateString: Primary=4, Sec=-1 get primary state info for state 4 get secondary state info for state 4 getPrimaryStateStr: using primary_table => primary_state_for_short_output_table get primary state info for state 4 get secondary state info for state 4 getPreviousStateString: Primary=4, Sec=-1 NFS_HOST= :network_up_complete[+197] [[ -z ]] :network_up_complete[+198] continue :network_up_complete[+257] [[ epprda == epprda ]] :network_up_complete[+257] [[ 0 -ne 0 ]] :network_up_complete[+262] amlog_trace 8611|epprda|net_ether_01 :network_up_complete[+61] clcycle clavailability.log :network_up_complete[+61] 1> /dev/null 2>& 1 :network_up_complete[+61] :network_up_complete[+61] cltime DATE=2023-09-28T15:56:25.542056 :network_up_complete[+61] echo |2023-09-28T15:56:25.542056|INFO: 8611|epprda|net_ether_01 :network_up_complete[+61] 1>> /var/hacmp/availability/clavailability.log :network_up_complete[+265] exit 0 Sep 28 2023 15:56:25 EVENT COMPLETED: network_up_complete epprda net_ether_01 0 |2023-09-28T15:56:25|8611|EVENT COMPLETED: network_up_complete epprda net_ether_01 0| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 8610 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_FAIL_ADAPTER|2023-09-28T15:56:27|8610| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Sep 28 2023 15:56:27 EVENT START: fail_interface epprda 61.81.244.134 |2023-09-28T15:56:27|8610|EVENT START: fail_interface epprda 61.81.244.134| :fail_interface[+64] version=%I% :fail_interface[+66] :fail_interface[+66] cl_get_path -S OP_SEP=~ :fail_interface[+68] [ 2 -ne 2 ] :fail_interface[+74] NODENAME=epprda :fail_interface[+75] ADDR=61.81.244.134 :fail_interface[+76] PREFIX_LEN= :fail_interface[+77] ADDR_FAMILY= :fail_interface[+79] set -u :fail_interface[+81] :fail_interface[+81] dspmsg scripts.cat 8062 Interface 61.81.244.134 has failed on node epprda.\n 61.81.244.134 epprda MSG=Interface 61.81.244.134 has failed on node epprda. :fail_interface[+82] echo Interface 61.81.244.134 has failed on node epprda. :fail_interface[+82] 1> /dev/console :fail_interface[+84] [[ epprda = epprda ]] :fail_interface[+88] :fail_interface[+88] cllsif -J ~ -Sn 61.81.244.134 :fail_interface[+88] cut -d~ -f3 NETWORK=net_ether_01 :fail_interface[+91] :fail_interface[+91] odmget -qname=net_ether_01 HACMPnetwork :fail_interface[+91] awk $1 == "alias" {print $3} :fail_interface[+91] sed s/"//g ALIASING=1 :fail_interface[+91] [[ 1 = 1 ]] :fail_interface[+96] set +u :fail_interface[+97] saveNSORDER=UNDEFINED :fail_interface[+98] set -u :fail_interface[+99] NSORDER=local :fail_interface[+99] export NSORDER :fail_interface[+100] netstat -in Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll en0 1500 link#2 fa.e6.13.4e.a9.20 2088659120 0 1461165543 0 0 en0 1500 61.81.244 61.81.244.134 2088659120 0 1461165543 0 0 lo0 16896 link#1 1996315400 0 1996315400 0 0 lo0 16896 127 127.0.0.1 1996315400 0 1996315400 0 0 lo0 16896 ::1%1 1996315400 0 1996315400 0 0 :fail_interface[+101] netstat -rnC Routing tables Destination Gateway Flags Wt Policy If Cost Config_Cost Route tree for Protocol Family 2 (Internet): default 61.81.244.1 UG 1 - en0 0 0 61.81.244.0 61.81.244.134 UHSb 1 - en0 0 0 => 61.81.244/24 61.81.244.134 U 1 - en0 0 0 61.81.244.134 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.255 61.81.244.134 UHSb 1 - en0 0 0 127/8 127.0.0.1 U 1 - lo0 0 0 Route tree for Protocol Family 24 (Internet v6): ::1%1 ::1%1 UH 1 - lo0 0 0 :fail_interface[+102] cl_configure_persistent_address fail_boot -i 61.81.244.134 -n net_ether_01 :cl_configure_persistent_address[1344] version=1.56.1.4 :cl_configure_persistent_address[1346] cl_get_path -S :cl_configure_persistent_address[1346] OP_SEP='~' :cl_configure_persistent_address[1349] get_local_nodename :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :cl_configure_persistent_address[1349] LOCALNODENAME=epprda :cl_configure_persistent_address[1354] [[ -z epprda ]] :cl_configure_persistent_address[1356] NETWORK='' :cl_configure_persistent_address[1357] ALIVE_IF='' :cl_configure_persistent_address[1358] FAILED_IF='' :cl_configure_persistent_address[1359] FAILED_ADDRESS='' :cl_configure_persistent_address[1360] UPDATE_CLSTRMGR=1 :cl_configure_persistent_address[1361] CHECK_HA_ALIVE=1 :cl_configure_persistent_address[1362] RESTORE_ROUTES=/usr/es/sbin/cluster/.pers_restore_routes :cl_configure_persistent_address[1363] RC=0 :cl_configure_persistent_address[1364] B_FLAG=0 :cl_configure_persistent_address[1366] ACTION=fail_boot :cl_configure_persistent_address[1367] shift :cl_configure_persistent_address[1369] getopt n:a:f:i:dPB -i 61.81.244.134 -n net_ether_01 :cl_configure_persistent_address[1369] set -- -i 61.81.244.134 -n net_ether_01 -- :cl_configure_persistent_address[1371] (( 0 != 0 )) :cl_configure_persistent_address[1371] [[ -z fail_boot ]] :cl_configure_persistent_address[1376] [[ -i != -- ]] :cl_configure_persistent_address[1392] FAILED_ADDRESS=61.81.244.134 :cl_configure_persistent_address[1393] shift :cl_configure_persistent_address[1393] shift :cl_configure_persistent_address[1376] [[ -n != -- ]] :cl_configure_persistent_address[1379] NETWORK=net_ether_01 :cl_configure_persistent_address[1380] shift :cl_configure_persistent_address[1380] shift :cl_configure_persistent_address[1376] [[ -- != -- ]] :cl_configure_persistent_address[1418] shift :cl_configure_persistent_address[1422] [[ fail_boot == up ]] :cl_configure_persistent_address[1520] [[ fail_boot == swap ]] :cl_configure_persistent_address[1667] [[ fail_boot == fail_boot ]] :cl_configure_persistent_address[1668] [[ -z 61.81.244.134 ]] :cl_configure_persistent_address[1668] [[ -z net_ether_01 ]] :cl_configure_persistent_address[1672] clgetif -a 61.81.244.134 :cl_configure_persistent_address[1672] 2> /dev/null :cl_configure_persistent_address[1672] awk '{print $1}' :cl_configure_persistent_address[1672] IF=en0 :cl_configure_persistent_address[1673] cllsif -J '~' -Sn 61.81.244.134 :cl_configure_persistent_address[1673] cut -d~ -f3 :cl_configure_persistent_address[1673] NETWORK=net_ether_01 :cl_configure_persistent_address[1677] isAliasingNetwork net_ether_01 :cl_configure_persistent_address[isAliasingNetwork:386] PS4_FUNC=isAliasingNetwork :cl_configure_persistent_address[isAliasingNetwork:386] typeset PS4_FUNC :cl_configure_persistent_address[isAliasingNetwork:387] [[ high == high ]] :cl_configure_persistent_address[isAliasingNetwork:387] set -x :cl_configure_persistent_address[isAliasingNetwork:389] NETWORK=net_ether_01 :cl_configure_persistent_address[isAliasingNetwork:391] odmget -qname=net_ether_01 HACMPnetwork :cl_configure_persistent_address[isAliasingNetwork:392] awk '$1 == "alias" {print $3}' :cl_configure_persistent_address[isAliasingNetwork:393] sed 's/"//g' :cl_configure_persistent_address[isAliasingNetwork:391] print 1 :cl_configure_persistent_address[1677] [[ 1 != 1 ]] :cl_configure_persistent_address[1682] cllsif -J '~' -Spi epprda :cl_configure_persistent_address[1682] awk -F~ '$2 == "persistent" && $3 == "net_ether_01" {print $1}' :cl_configure_persistent_address[1682] PERSISTENT='' :cl_configure_persistent_address[1684] [[ -z '' ]] :cl_configure_persistent_address[1686] exit 0 :fail_interface[+106] :fail_interface[+106] clgetif -n 61.81.244.134 :fail_interface[+106] LC_ALL=C NETMASK=255.255.255.0 :fail_interface[+107] :fail_interface[+107] clgetif -a 61.81.244.134 :fail_interface[+107] LC_ALL=C IF1=en0 :fail_interface[+108] BOOT1=61.81.244.134 :fail_interface[+111] :fail_interface[+111] cllsif -J ~ -Si epprda :fail_interface[+111] awk -F~ -v net=net_ether_01 -v if1=en0 ($2=="boot" && \ $3==net && $9!=if1) {printf("%s\n",$7)} BOOT2= :fail_interface[+111] [[ -n ]] :fail_interface[+111] [[ UNDEFINED != UNDEFINED ]] :fail_interface[+179] export NSORDER= :fail_interface[+184] exit 0 Sep 28 2023 15:56:27 EVENT COMPLETED: fail_interface epprda 61.81.244.134 0 |2023-09-28T15:56:27|8610|EVENT COMPLETED: fail_interface epprda 61.81.244.134 0| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 8613 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_ADAPTER|2023-09-28T15:56:30|8613| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Sep 28 2023 15:56:30 EVENT START: join_interface epprda 61.81.244.134 |2023-09-28T15:56:30|8613|EVENT START: join_interface epprda 61.81.244.134| :join_interface[+64] version=%I% :join_interface[+67] :join_interface[+67] cl_get_path -S OP_SEP=~ :join_interface[+69] [ 2 -ne 2 ] :join_interface[+75] NODENAME=epprda :join_interface[+76] ADDR=61.81.244.134 :join_interface[+77] BOOTIF= :join_interface[+78] ADDR_FAMILY= :join_interface[+79] PREFIX_LEN= :join_interface[+81] set -u :join_interface[+82] [[ epprda = epprda ]] :join_interface[+88] :join_interface[+88] cllsif -J ~ -Si epprda :join_interface[+88] awk -F~ -v bootif=61.81.244.134 ($2=="boot" && $7==bootif) \ {printf("%s\n",$9)} BOOTIF=en0 :join_interface[+88] [[ -n en0 ]] :join_interface[+91] cllsif -J ~ -Sn 61.81.244.134 :join_interface[+91] cut -d~ -f12 :join_interface[+92] tr ~ :join_interface[+92] read IF_ALIAS :join_interface[+92] [[ -n ]] :join_interface[+121] :join_interface[+121] dspmsg scripts.cat 8064 Interface 61.81.244.134 is now available on node epprda.\n 61.81.244.134 epprda MSG=Interface 61.81.244.134 is now available on node epprda. :join_interface[+122] echo Interface 61.81.244.134 is now available on node epprda. :join_interface[+122] 1> /dev/console :join_interface[+124] exit 0 Sep 28 2023 15:56:30 EVENT COMPLETED: join_interface epprda 61.81.244.134 0 |2023-09-28T15:56:30|8613|EVENT COMPLETED: join_interface epprda 61.81.244.134 0| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 8612 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_ADAPTER|2023-09-28T15:56:32|8612| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Sep 28 2023 15:56:32 EVENT START: join_interface epprda 61.81.244.156 |2023-09-28T15:56:32|8612|EVENT START: join_interface epprda 61.81.244.156| :join_interface[+64] version=%I% :join_interface[+67] :join_interface[+67] cl_get_path -S OP_SEP=~ :join_interface[+69] [ 2 -ne 2 ] :join_interface[+75] NODENAME=epprda :join_interface[+76] ADDR=61.81.244.156 :join_interface[+77] BOOTIF= :join_interface[+78] ADDR_FAMILY= :join_interface[+79] PREFIX_LEN= :join_interface[+81] set -u :join_interface[+82] [[ epprda = epprda ]] :join_interface[+88] :join_interface[+88] cllsif -J ~ -Si epprda :join_interface[+88] awk -F~ -v bootif=61.81.244.156 ($2=="boot" && $7==bootif) \ {printf("%s\n",$9)} BOOTIF= :join_interface[+88] [[ -n ]] :join_interface[+121] :join_interface[+121] dspmsg scripts.cat 8064 Interface 61.81.244.156 is now available on node epprda.\n 61.81.244.156 epprda MSG=Interface 61.81.244.156 is now available on node epprda. :join_interface[+122] echo Interface 61.81.244.156 is now available on node epprda. :join_interface[+122] 1> /dev/console :join_interface[+124] exit 0 Sep 28 2023 15:56:32 EVENT COMPLETED: join_interface epprda 61.81.244.156 0 |2023-09-28T15:56:32|8612|EVENT COMPLETED: join_interface epprda 61.81.244.156 0| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 8609 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_FAIL_NODE_DEP_COMPLETE|2023-09-28T15:56:34|8609| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Sep 28 2023 15:56:34 EVENT START: node_down_complete epprda |2023-09-28T15:56:34|8609|EVENT START: node_down_complete epprda| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-28T15:56:34.758779 + echo '|2023-09-28T15:56:34.758779|INFO: node_down_complete|epprda' + 1>> /var/hacmp/availability/clavailability.log :node_down_complete[107] version=%I% :node_down_complete[111] : Pick up input :node_down_complete[113] NODENAME=epprda :node_down_complete[113] export NODENAME :node_down_complete[114] PARAM='' :node_down_complete[114] export PARAM :node_down_complete[116] NODE_HALT_CONTROL_FILE=/usr/es/sbin/cluster/etc/ha_nodehalt.lock :node_down_complete[125] STATUS=0 :node_down_complete[127] set -u :node_down_complete[129] (( 1 < 1 )) :node_down_complete[136] : serial number for this event is 8609 :node_down_complete[139] [[ '' == forced ]] :node_down_complete[151] : if RG_DEPENDENCIES is set to false by the cluster manager, :node_down_complete[152] : then resource groups will be processed via clsetenvgrp :node_down_complete[154] [[ '' != forced ]] :node_down_complete[154] [[ TRUE == FALSE ]] :node_down_complete[184] : For each participating resource group, serially process the resources :node_down_complete[186] LOCALCOMP=N :node_down_complete[189] : if RG_DEPENDENCIES is set to false by the cluster manager, :node_down_complete[190] : then resource groups will be processed via clsetenvgrp :node_down_complete[192] [[ '' != forced ]] :node_down_complete[192] [[ TRUE == FALSE ]] :node_down_complete[232] [[ '' != forced ]] :node_down_complete[232] [[ epprda == epprda ]] :node_down_complete[235] : Call ss-unload replicated resource methods if they are defined :node_down_complete[237] cl_rrmethods2call ss_unload :cl_rrmethods2call[56] version=%I% :cl_rrmethods2call[84] RRMETHODS='' :cl_rrmethods2call[85] NEED_RR_ENV_VARS=no :cl_rrmethods2call[104] : The load and unload methods if defined are returned on the :cl_rrmethods2call[105] : local node :cl_rrmethods2call[107] [[ epprda == epprda ]] :cl_rrmethods2call[109] NEED_RR_ENV_VARS=yes :cl_rrmethods2call[129] : Set the '*_REP_RESOURCE' variables if needed. :cl_rrmethods2call[131] [[ yes == yes ]] :cl_rrmethods2call[133] cllsres :cl_rrmethods2call[133] 2> /dev/null :cl_rrmethods2call[133] eval APPLICATIONS='"epprd_app"' EXPORT_FILESYSTEM='"/board_org' '/sapmnt/EPP"' FILESYSTEM='""' FORCED_VARYON='"false"' FSCHECK_TOOL='"fsck"' FS_BEFORE_IPADDR='"false"' MOUNT_FILESYSTEM='"/board;/board_org"' RECOVERY_METHOD='"sequential"' SERVICE_LABEL='"epprd"' SSA_DISK_FENCING='"false"' VG_AUTO_IMPORT='"false"' VOLUME_GROUP='"datavg"' USERDEFINED_RESOURCES='""' :cl_rrmethods2call[1] APPLICATIONS=epprd_app :cl_rrmethods2call[1] EXPORT_FILESYSTEM='/board_org /sapmnt/EPP' :cl_rrmethods2call[1] FILESYSTEM='' :cl_rrmethods2call[1] FORCED_VARYON=false :cl_rrmethods2call[1] FSCHECK_TOOL=fsck :cl_rrmethods2call[1] FS_BEFORE_IPADDR=false :cl_rrmethods2call[1] MOUNT_FILESYSTEM='/board;/board_org' :cl_rrmethods2call[1] RECOVERY_METHOD=sequential :cl_rrmethods2call[1] SERVICE_LABEL=epprd :cl_rrmethods2call[1] SSA_DISK_FENCING=false :cl_rrmethods2call[1] VG_AUTO_IMPORT=false :cl_rrmethods2call[1] VOLUME_GROUP=datavg :cl_rrmethods2call[1] USERDEFINED_RESOURCES='' :cl_rrmethods2call[137] [[ -n '' ]] :cl_rrmethods2call[142] [[ -n '' ]] :cl_rrmethods2call[147] [[ -n '' ]] :cl_rrmethods2call[152] [[ -n '' ]] :cl_rrmethods2call[157] [[ -n '' ]] :cl_rrmethods2call[162] [[ -n '' ]] :cl_rrmethods2call[167] [[ -n '' ]] :cl_rrmethods2call[172] [[ -n '' ]] :cl_rrmethods2call[182] [[ -z '' ]] :cl_rrmethods2call[184] typeset sysmgdata :cl_rrmethods2call[185] typeset reposmgdata :cl_rrmethods2call[186] [[ -x /usr/es/sbin/cluster/xd_generic/xd_cli/clxd_list_mg_smit ]] :cl_rrmethods2call[191] [[ -n '' ]] :cl_rrmethods2call[191] [[ -n '' ]] :cl_rrmethods2call[197] echo '' :cl_rrmethods2call[199] return 0 :node_down_complete[237] METHODS='' :node_down_complete[251] : If dependencies are configured and node is being forced down then :node_down_complete[252] : no need to do varyoff for any passive mode VGs :node_down_complete[254] [[ TRUE == TRUE ]] :node_down_complete[257] : If any volume groups were varied on in passive mode when this node :node_down_complete[258] : came up, all the prior resource group processing would have left them :node_down_complete[259] : in passive mode. Completely vary them off at this point. :node_down_complete[261] lsvg -L :node_down_complete[261] lsvg -L -o :node_down_complete[261] paste -s '-d|' - :node_down_complete[261] grep -w -v -x -E 'caavg_private|rootvg' :node_down_complete[261] INACTIVE_VGS=$'datavg\naltinst_rootvg' :node_down_complete[264] lsvg -L datavg :node_down_complete[264] 2> /dev/null :node_down_complete[264] grep -i -q passive-only :node_down_complete[267] : Reset any read only fence height prior to vary off :node_down_complete[269] cl_set_vg_fence_height -c datavg rw cl_set_vg_fence_height[126]: version @(#)10 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 cl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY) cl_set_vg_fence_height[214]: read(datavg, 16) cl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0)) :node_down_complete[270] RC=0 :node_down_complete[271] (( 0 != 0 )) :node_down_complete[282] : 'lsvg ' will show if a volume group is varied :node_down_complete[283] : on in passive mode. Any such are varied off :node_down_complete[285] cltime 2023-09-28T15:56:34.875758 :node_down_complete[286] varyoffvg datavg :node_down_complete[287] RC=0 :node_down_complete[288] cltime 2023-09-28T15:56:35.002441 :node_down_complete[289] : rc_varyoffvg = 0 :node_down_complete[291] : Force a timestamp update to get timestamps in sync :node_down_complete[292] : since timing may prevent LVM from doing so :node_down_complete[294] cl_update_vg_odm_ts -o -f datavg :cl_update_vg_odm_ts(0.000)[77] version=1.13 :cl_update_vg_odm_ts(0.000)[121] o_flag='' :cl_update_vg_odm_ts(0.000)[122] f_flag='' :cl_update_vg_odm_ts(0.000)[123] getopts :of option :cl_update_vg_odm_ts(0.000)[126] : Local timestamps should be good, since volume group was :cl_update_vg_odm_ts(0.000)[127] : just varyied on or off :cl_update_vg_odm_ts(0.001)[128] o_flag=TRUE :cl_update_vg_odm_ts(0.001)[123] getopts :of option :cl_update_vg_odm_ts(0.001)[131] : Update timestamps clusterwide, even if LVM support is in :cl_update_vg_odm_ts(0.001)[132] : place :cl_update_vg_odm_ts(0.001)[133] f_flag=TRUE :cl_update_vg_odm_ts(0.001)[123] getopts :of option :cl_update_vg_odm_ts(0.001)[142] shift 2 :cl_update_vg_odm_ts(0.001)[144] vg_name=datavg :cl_update_vg_odm_ts(0.001)[145] [[ -z datavg ]] :cl_update_vg_odm_ts(0.001)[151] shift :cl_update_vg_odm_ts(0.001)[152] node_list='' :cl_update_vg_odm_ts(0.001)[153] /usr/es/sbin/cluster/utilities/cl_get_path all :cl_update_vg_odm_ts(0.004)[153] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin :cl_update_vg_odm_ts(0.004)[155] [[ -z TRUE ]] :cl_update_vg_odm_ts(0.004)[214] found_new_ts='' :cl_update_vg_odm_ts(0.004)[217] : Try to update the volume group ODM time stamp on every other node :cl_update_vg_odm_ts(0.004)[218] : in the resource group that owns datavg :cl_update_vg_odm_ts(0.004)[220] [[ -z '' ]] :cl_update_vg_odm_ts(0.004)[223] : We were not given a node list. The node list is derived from :cl_update_vg_odm_ts(0.004)[224] : the resource group that the volume group is in. :cl_update_vg_odm_ts(0.004)[226] /usr/es/sbin/cluster/utilities/clodmget -q 'name like *VOLUME_GROUP and value = datavg' -f group -n HACMPresource :cl_update_vg_odm_ts(0.006)[226] group_name=epprd_rg :cl_update_vg_odm_ts(0.006)[227] [[ -n epprd_rg ]] :cl_update_vg_odm_ts(0.006)[230] : Find all other cluster nodes in the resource group that owns :cl_update_vg_odm_ts(0.006)[231] : the volume group datavg :cl_update_vg_odm_ts(0.006)[233] /usr/es/sbin/cluster/utilities/clodmget -q 'group = epprd_rg' -f nodes -n HACMPgroup :cl_update_vg_odm_ts(0.009)[233] node_list='epprda epprds' :cl_update_vg_odm_ts(0.009)[238] : Check to see if the volume group is known locally :cl_update_vg_odm_ts(0.009)[240] odmget -q 'name = datavg and PdDvLn = logical_volume/vgsubclass/vgtype' CuDv :cl_update_vg_odm_ts(0.011)[240] [[ -z $'\nCuDv:\n\tname = "datavg"\n\tstatus = 1\n\tchgstatus = 1\n\tddins = ""\n\tlocation = ""\n\tparent = ""\n\tconnwhere = ""\n\tPdDvLn = "logical_volume/vgsubclass/vgtype"' ]] :cl_update_vg_odm_ts(0.011)[272] : Get the vgid for volume group datavg :cl_update_vg_odm_ts(0.011)[274] getlvodm -v datavg :cl_update_vg_odm_ts(0.014)[274] vgid=00c44af100004b00000001851e9dc053 :cl_update_vg_odm_ts(0.014)[280] : Get the volume group timestamp for datavg :cl_update_vg_odm_ts(0.014)[281] : as currently saved in ODM :cl_update_vg_odm_ts(0.014)[283] getlvodm -T 00c44af100004b00000001851e9dc053 :cl_update_vg_odm_ts(0.016)[283] current_odm_ts=651523922bbb5897 :cl_update_vg_odm_ts(0.016)[288] [[ TRUE != TRUE ]] :cl_update_vg_odm_ts(0.016)[346] : Is an update 'necessary?' :cl_update_vg_odm_ts(0.016)[348] [[ -n 'epprda epprds' ]] :cl_update_vg_odm_ts(0.016)[350] LOCALNODENAME=epprda :cl_update_vg_odm_ts(0.016)[351] LOCALNODENAME=epprda :cl_update_vg_odm_ts(0.016)[352] [[ -n epprda ]] :cl_update_vg_odm_ts(0.016)[355] : Skip the local node, since we have done that above. :cl_update_vg_odm_ts(0.018)[357] print 'epprda epprds' :cl_update_vg_odm_ts(0.019)[357] tr ' ' '\n' :cl_update_vg_odm_ts(0.021)[357] tr , '\n' :cl_update_vg_odm_ts(0.023)[357] grep -v -w -x epprda :cl_update_vg_odm_ts(0.024)[357] paste -s -d, - :cl_update_vg_odm_ts(0.026)[357] node_list=epprds :cl_update_vg_odm_ts(0.026)[365] : Update the time stamp on all those other nodes on which the :cl_update_vg_odm_ts(0.026)[366] : volume group is currently varied off. LVM will take care of :cl_update_vg_odm_ts(0.026)[367] : the others. :cl_update_vg_odm_ts(0.026)[369] [[ -n epprds ]] :cl_update_vg_odm_ts(0.026)[371] cl_on_node -cspoc '-f -n epprds' 'lsvg -o | grep -qx datavg || /usr/sbin/putlvodm -T 651523922bbb5897 00c44af100004b00000001851e9dc053 && /usr/sbin/savebase > /dev/null' :cl_update_vg_odm_ts(0.027)[371] _CSPOC_CALLED_FROM_SMIT=true clhaver[576]: version 1.14 clhaver[591]: colon delimied output clhaver[612]: MINVER=6100 clhaver[624]: thread(epprds) clhaver[144]: cl_gethostbynode epprds cl_gethostbynode[102]: version 1.1 i_flag=0 given name is epprds cl_gethostbynode[127]: cl_query nodes=2 cl_gethostbynode[161]: epprds is a PowerHA node name cl_gethostbynode[313]: epprds is the CAA host matching PowerHA node epprds clhaver[157]: node epprds resolves to epprds clhaver[166]: cl_socket(COLLVER epprds epprds) clhaver[191]: cl_connect(epprds) clhaver[230]: read(epprds) epprds: :cl_rsh[99] version=1.4 epprds: :cl_rsh[102] CAA_node_name='' epprds: :cl_rsh[105] : Process optional flags epprds: :cl_rsh[107] cmd_flag=-n epprds: :cl_rsh[108] [[ -n == -n ]] epprds: :cl_rsh[111] : Remove the no standard input flag epprds: :cl_rsh[113] shift epprds: :cl_rsh[124] : Pick up and check the input epprds: :cl_rsh[126] read destination command epprds: :cl_rsh[126] print 'epprds /usr/es/sbin/cluster/cspoc/cexec eval gmhdhgghcacngpcahmcaghhcgfhacacnhbhicagegbhegbhgghcahmhmcacphfhdhccphdgcgjgocphahfhegmhggpgegncacnfecadgdfdbdfdcdddjdcdcgcgcgcdfdidjdhcadadagddedegbggdbdadadadadegcdadadadadadadadbdidfdbgfdjgegddadfddcacgcgcacphfhdhccphdgcgjgocphdgbhggfgcgbhdgfcadocacpgegfhgcpgohfgmgm' epprds: :cl_rsh[127] [[ -z epprds ]] epprds: :cl_rsh[127] [[ -z '/usr/es/sbin/cluster/cspoc/cexec eval gmhdhgghcacngpcahmcaghhcgfhacacnhbhicagegbhegbhgghcahmhmcacphfhdhccphdgcgjgocphahfhegmhggpgegncacnfecadgdfdbdfdcdddjdcdcgcgcgcdfdidjdhcadadagddedegbggdbdadadadadegcdadadadadadadadbdidfdbgfdjgegddadfddcacgcgcacphfhdhccphdgcgjgocphdgbhggfgcgbhdgfcadocacpgegfhgcpgohfgmgm' ]] epprds: :cl_rsh[136] /usr/es/sbin/cluster/utilities/cl_nn2hn epprds epprds: :cl_nn2hn[83] version=1.11 epprds: :cl_nn2hn[86] CAA_host_name='' epprds: :cl_nn2hn[86] typeset CAA_host_name epprds: :cl_nn2hn[87] node_name='' epprds: :cl_nn2hn[87] typeset node_name epprds: :cl_nn2hn[88] node_interfaces='' epprds: :cl_nn2hn[88] typeset node_interfaces epprds: :cl_nn2hn[89] COMM_PATH='' epprds: :cl_nn2hn[89] typeset COMM_PATH epprds: :cl_nn2hn[90] r_flag='' epprds: :cl_nn2hn[90] typeset r_flag epprds: :cl_nn2hn[93] : Pick up and check the input epprds: :cl_nn2hn[95] getopts r option epprds: :cl_nn2hn[106] : Pick up the destination, which follows the options epprds: :cl_nn2hn[108] shift 0 epprds: :cl_nn2hn[109] destination=epprds epprds: :cl_nn2hn[109] typeset destination epprds: :cl_nn2hn[111] [[ -z epprds ]] epprds: :cl_nn2hn[121] : In order to prevent recursion, first you must prevent recursion... epprds: :cl_nn2hn[123] [[ '' != TRUE ]] epprds: :cl_nn2hn[126] : This routine is not being called from cl_query_hn_id, so call it epprds: :cl_nn2hn[127] : to see if it can find the CAA host name based on a common short epprds: :cl_nn2hn[128] : id, or match on CAA host name, or match on CAA short name, or epprds: :cl_nn2hn[129] : similar match in /etc/cluster/rhosts. epprds: :cl_nn2hn[131] cl_query_hn_id -q -i epprds epprds: cl_query_hn_id[137]: version 1.2 epprds: cl_gethostbynode[102]: version 1.1 i_flag=105 given name is epprds epprds: cl_gethostbynode[127]: cl_query nodes=2 epprds: cl_gethostbynode[161]: epprds is a PowerHA node name epprds: cl_gethostbynode[313]: epprds is the CAA host matching PowerHA node epprds epprds: :cl_nn2hn[131] CAA_host_name=epprds epprds: :cl_nn2hn[132] RC=0 epprds: :cl_nn2hn[133] (( 0 == 0 )) epprds: :cl_nn2hn[136] : The straight forward tests worked! epprds: :cl_nn2hn[138] [[ epprds == @(+([0-9.])|+([0-9:])) ]] epprds: :cl_nn2hn[159] [[ -z epprds ]] epprds: :cl_nn2hn[340] [[ -z epprds ]] epprds: :cl_nn2hn[345] [[ -n epprds ]] epprds: :cl_nn2hn[348] : We have found epprds is our best guess at a CAA host name epprds: :cl_nn2hn[349] : corresponding to epprds epprds: :cl_nn2hn[351] print epprds epprds: :cl_nn2hn[352] return 0 epprds: :cl_rsh[136] CAA_node_name=epprds epprds: :cl_rsh[148] : Invoke clcomd epprds: :cl_rsh[150] /usr/sbin/clrsh epprds -n '/usr/es/sbin/cluster/cspoc/cexec eval gmhdhgghcacngpcahmcaghhcgfhacacnhbhicagegbhegbhgghcahmhmcacphfhdhccphdgcgjgocphahfhegmhggpgegncacnfecadgdfdbdfdcdddjdcdcgcgcgcdfdidjdhcadadagddedegbggdbdadadadadegcdadadadadadadadbdidfdbgfdjgegddadfddcacgcgcacphfhdhccphdgcgjgocphdgbhggfgcgbhdgfcadocacpgegfhgcpgohfgmgm' epprds: :cl_rsh[151] return 0 :cl_update_vg_odm_ts(0.500)[375] return 0 :node_down_complete[297] : If VG fencing is in place, restore the fence height to read/only. :node_down_complete[299] cl_set_vg_fence_height -c datavg ro cl_set_vg_fence_height[126]: version @(#)10 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 cl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY) cl_set_vg_fence_height[214]: read(datavg, 16) cl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=ro(2)) :node_down_complete[300] RC=0 :node_down_complete[301] : return code from volume group fencing is 0 :node_down_complete[302] (( 0 != 0 )) :node_down_complete[264] lsvg -L altinst_rootvg :node_down_complete[264] 2> /dev/null :node_down_complete[264] grep -i -q passive-only :node_down_complete[315] : remove the flag file used to indicate reconfig_resources :node_down_complete[317] rm -f /usr/es/sbin/cluster/etc/.hacmp_wlm_config_changed :node_down_complete[320] : Run WLM stop script :node_down_complete[322] cl_wlm_stop :cl_wlm_stop[+55] version=%I% :cl_wlm_stop[+59] :cl_wlm_stop[+59] clwlmruntime -l :cl_wlm_stop[+59] awk BEGIN { FS = ":" } $1 !~ /^#.*/ { print $1 } HA_WLM_CONFIG=HA_WLM_config :cl_wlm_stop[+60] [[ -z HA_WLM_config ]] :cl_wlm_stop[+69] wlmcntrl -q WLM is stopped :cl_wlm_stop[+70] WLM_IS_RUNNING=1 :cl_wlm_stop[+72] WLM_CONFIG_FILES=classes shares limits rules :cl_wlm_stop[+74] PREV_WLM_CONFIG= :cl_wlm_stop[+76] HA_STARTED_WLM=false :cl_wlm_stop[+78] [[ -e /etc/wlm/HA_WLM_config/HA_prev_config_subdir ]] :cl_wlm_stop[+86] [[ -e /etc/wlm/HA_WLM_config/classes.prev ]] :cl_wlm_stop[+86] [[ -e /etc/wlm/HA_WLM_config/shares.prev ]] :cl_wlm_stop[+86] [[ -e /etc/wlm/HA_WLM_config/limits.prev ]] :cl_wlm_stop[+86] [[ -e /etc/wlm/HA_WLM_config/rules.prev ]] :cl_wlm_stop[+107] [[ -n ]] :cl_wlm_stop[+107] [[ true = false ]] :cl_wlm_stop[+144] exit 0 :node_down_complete[330] [[ epprda == epprda ]] :node_down_complete[333] : Node is down: Create the lock file that inhibits node halt :node_down_complete[335] /bin/touch /usr/es/sbin/cluster/etc/ha_nodehalt.lock :node_down_complete[339] : If this is the last node to leave, restore read write access to all volume groups :node_down_complete[341] [[ '' != forced ]] :node_down_complete[343] [[ -z epprds ]] :node_down_complete[392] [[ epprda == epprda ]] :node_down_complete[395] : Node is gracefully going down. :node_down_complete[397] clodmget -n -q policy=scsi -f value HACMPsplitmerge :node_down_complete[397] SCSIPR_ENABLED='' :node_down_complete[397] typeset SCSIPR_ENABLED :node_down_complete[398] [[ '' == Yes ]] :node_down_complete[452] : refresh clcomd, FWIW :node_down_complete[454] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. :node_down_complete[459] : This is the final info of all RGs: :node_down_complete[461] clRGinfo -p -t :node_down_complete[461] 2>& 1 clRGinfo[431]: version I clRGinfo[517]: Number of resource groups = 0 clRGinfo[562]: cluster epprda_cluster is version = 22 clRGinfo[597]: no resource groups specified on command line - print all clRGinfo[685]: Current group is 'epprd_rg' get primary state info for state 4 get secondary state info for state 4 getPrimaryStateStr: using primary_table => primary_state_table get primary state info for state 4 get secondary state info for state 4 getPrimaryStateStr: using primary_table => primary_state_table Cluster Name: epprda_cluster Resource Group Name: epprd_rg Node Group State Delayed Timers ---------------------------------------------------------------- --------------- ------------------- epprda OFFLINE epprds OFFLINE :node_down_complete[463] return 0 Sep 28 2023 15:56:35 EVENT COMPLETED: node_down_complete epprda 0 |2023-09-28T15:56:35|8609|EVENT COMPLETED: node_down_complete epprda 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-28T15:56:35.652530 + echo '|2023-09-28T15:56:35.652530|INFO: node_down_complete|epprda|0' + 1>> /var/hacmp/availability/clavailability.log clexit.rc : Normal termination of clstrmgrES. Restart now. 0513-059 The clstrmgrES Subsystem has been started. Subsystem PID is 29622606. [ROHALOG:15270362] Automatic Release of Resource: Start :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :clmanageroha[318] : version='@(#)' 5881272 43haes/usr/sbin/cluster/events/clmanageroha.sh, 61aha_r726, 2205A_aha726, May 16 2022 12:15 PM :clmanageroha[321] clodmget -n -f connection_type HACMPhmcparam :clmanageroha[321] CONN_TYPE=0 :clmanageroha[321] typeset -i CONN_TYPE :clmanageroha[323] clodmget -q name='epprda and object like POWERVS_*' -nf name HACMPnode :clmanageroha[323] 2> /dev/null :clmanageroha[323] [[ -n '' ]] :clmanageroha[326] export CONN_TYPE :clmanageroha[331] roha_session_open -o release -s -t :clmanageroha[roha_session_open:131] roha_session.id=16122174 :clmanageroha[roha_session_open:132] date :clmanageroha[roha_session_open:132] LC_ALL=C :clmanageroha[roha_session_open:132] roha_session_log 'Open session 16122174 at Sat Sep 30 01:26:58 KORST 2023' [ROHALOG:16122174:(0.134)] Open session 16122174 at Sat Sep 30 01:26:58 KORST 2023 :clmanageroha[roha_session_open:137] getopts :cso:l:t opt :clmanageroha[roha_session_open:146] roha_session.operation=release :clmanageroha[roha_session_open:137] getopts :cso:l:t opt :clmanageroha[roha_session_open:143] roha_session.systemmirror_mode=1 :clmanageroha[roha_session_open:137] getopts :cso:l:t opt :clmanageroha[roha_session_open:152] online_rgs_skip=1 :clmanageroha[roha_session_open:137] getopts :cso:l:t opt :clmanageroha[roha_session_open:163] [[ release != @(acquire|release|adjust) ]] :clmanageroha[roha_session_open:168] no_roha_apps=0 :clmanageroha[roha_session_open:168] typeset -i no_roha_apps :clmanageroha[roha_session_open:169] need_explicit_res_rel=0 :clmanageroha[roha_session_open:169] typeset -i need_explicit_res_rel :clmanageroha[roha_session_open:187] [[ -n '' ]] :clmanageroha[roha_session_open:188] [[ -z '' ]] :clmanageroha[roha_session_open:188] clmgr q roha :clmanageroha[roha_session_open:188] [[ -z '' ]] :clmanageroha[roha_session_open:189] roha_session_log 'INFO: No ROHA configured on applications.\n' [ROHALOG:16122174:(0.775)] INFO: No ROHA configured on applications. [ROHALOG:16122174:(0.775)] :clmanageroha[roha_session_open:190] no_roha_apps=1 :clmanageroha[roha_session_open:195] read_tunables :clmanageroha[roha_session_open:196] echo '' :clmanageroha[roha_session_open:196] grep -q epprda :clmanageroha[roha_session_open:197] (( 1 == 0 )) :clmanageroha[roha_session_open:202] (( 1 == 1 )) :clmanageroha[roha_session_open:203] roha_session_read_odm_dynresop DLPAR_MEM :clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_MEM -nf value HACMPdynresop :clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos :clmanageroha[roha_session_read_odm_dynresop:816] out='' :clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 :clmanageroha[roha_session_open:203] (( 0 == 0.00 )) :clmanageroha[roha_session_open:204] roha_session_read_odm_dynresop DLPAR_PROCS :clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_PROCS -nf value HACMPdynresop :clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos :clmanageroha[roha_session_read_odm_dynresop:816] out='' :clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 :clmanageroha[roha_session_open:204] (( 0 == 0 )) :clmanageroha[roha_session_open:205] roha_session_read_odm_dynresop DLPAR_PROC_UNITS :clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_PROC_UNITS -nf value HACMPdynresop :clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos :clmanageroha[roha_session_read_odm_dynresop:816] out='' :clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 :clmanageroha[roha_session_open:205] (( 0 == 0.00 )) :clmanageroha[roha_session_open:206] roha_session_log 'INFO: Nothing to be done.\n' [ROHALOG:16122174:(0.835)] INFO: Nothing to be done. [ROHALOG:16122174:(0.835)] :clmanageroha[roha_session_open:207] exit 0 [ROHALOG:15270362] Automatic Release of Resource: End rc.init: Removed /usr/es/sbin/cluster/.cthags.exit file. [ROHALOG:14811608] Automatic Release of Resource: Start :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :clmanageroha[318] : version='@(#)' 5881272 43haes/usr/sbin/cluster/events/clmanageroha.sh, 61aha_r726, 2205A_aha726, May 16 2022 12:15 PM :clmanageroha[321] clodmget -n -f connection_type HACMPhmcparam :clmanageroha[321] CONN_TYPE=0 :clmanageroha[321] typeset -i CONN_TYPE :clmanageroha[323] clodmget -q name='epprda and object like POWERVS_*' -nf name HACMPnode :clmanageroha[323] 2> /dev/null :clmanageroha[323] [[ -n '' ]] :clmanageroha[326] export CONN_TYPE :clmanageroha[331] roha_session_open -o release -s -t :clmanageroha[roha_session_open:131] roha_session.id=14745866 :clmanageroha[roha_session_open:132] date :clmanageroha[roha_session_open:132] LC_ALL=C :clmanageroha[roha_session_open:132] roha_session_log 'Open session 14745866 at Sat Sep 30 03:09:14 KORST 2023' [ROHALOG:14745866:(0.136)] Open session 14745866 at Sat Sep 30 03:09:14 KORST 2023 :clmanageroha[roha_session_open:137] getopts :cso:l:t opt :clmanageroha[roha_session_open:146] roha_session.operation=release :clmanageroha[roha_session_open:137] getopts :cso:l:t opt :clmanageroha[roha_session_open:143] roha_session.systemmirror_mode=1 :clmanageroha[roha_session_open:137] getopts :cso:l:t opt :clmanageroha[roha_session_open:152] online_rgs_skip=1 :clmanageroha[roha_session_open:137] getopts :cso:l:t opt :clmanageroha[roha_session_open:163] [[ release != @(acquire|release|adjust) ]] :clmanageroha[roha_session_open:168] no_roha_apps=0 :clmanageroha[roha_session_open:168] typeset -i no_roha_apps :clmanageroha[roha_session_open:169] need_explicit_res_rel=0 :clmanageroha[roha_session_open:169] typeset -i need_explicit_res_rel :clmanageroha[roha_session_open:187] [[ -n '' ]] :clmanageroha[roha_session_open:188] [[ -z '' ]] :clmanageroha[roha_session_open:188] clmgr q roha :clmanageroha[roha_session_open:188] [[ -z '' ]] :clmanageroha[roha_session_open:189] roha_session_log 'INFO: No ROHA configured on applications.\n' [ROHALOG:14745866:(0.788)] INFO: No ROHA configured on applications. [ROHALOG:14745866:(0.789)] :clmanageroha[roha_session_open:190] no_roha_apps=1 :clmanageroha[roha_session_open:195] read_tunables :clmanageroha[roha_session_open:196] echo '' :clmanageroha[roha_session_open:196] grep -q epprda :clmanageroha[roha_session_open:197] (( 1 == 0 )) :clmanageroha[roha_session_open:202] (( 1 == 1 )) :clmanageroha[roha_session_open:203] roha_session_read_odm_dynresop DLPAR_MEM :clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_MEM -nf value HACMPdynresop :clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos :clmanageroha[roha_session_read_odm_dynresop:816] out='' :clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 :clmanageroha[roha_session_open:203] (( 0 == 0.00 )) :clmanageroha[roha_session_open:204] roha_session_read_odm_dynresop DLPAR_PROCS :clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_PROCS -nf value HACMPdynresop :clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos :clmanageroha[roha_session_read_odm_dynresop:816] out='' :clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 :clmanageroha[roha_session_open:204] (( 0 == 0 )) :clmanageroha[roha_session_open:205] roha_session_read_odm_dynresop DLPAR_PROC_UNITS :clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_PROC_UNITS -nf value HACMPdynresop :clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos :clmanageroha[roha_session_read_odm_dynresop:816] out='' :clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 :clmanageroha[roha_session_open:205] (( 0 == 0.00 )) :clmanageroha[roha_session_open:206] roha_session_log 'INFO: Nothing to be done.\n' [ROHALOG:14745866:(0.852)] INFO: Nothing to be done. [ROHALOG:14745866:(0.852)] :clmanageroha[roha_session_open:207] exit 0 [ROHALOG:14811608] Automatic Release of Resource: End rc.init: Removed /usr/es/sbin/cluster/.cthags.exit file. Sep 30 2023 03:25:02 EVENT START: admin_op clrm_start_request 18768 0 |2023-09-30T03:25:02|18768|EVENT START: admin_op clrm_start_request 18768 0| :admin_op[110] trap sigint_handler INT :admin_op[116] OP_TYPE=clrm_start_request :admin_op[116] typeset OP_TYPE :admin_op[117] SERIAL=18768 :admin_op[117] typeset -li SERIAL :admin_op[118] INVALID=0 :admin_op[118] typeset -li INVALID The administrator initiated the following action at Sat Sep 30 03:25:02 KORST 2023 Check smit.log and clutils.log for additional details. Starting PowerHA cluster services on node: epprda in normal mode... Sep 30 2023 03:25:05 EVENT COMPLETED: admin_op clrm_start_request 18768 0 0 |2023-09-30T03:25:05|18768|EVENT COMPLETED: admin_op clrm_start_request 18768 0 0| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 18769 Cluster services started on node 'epprda' Enqueued rg_move acquire event for resource group epprd_rg. Node Up Completion Event has been enqueued. ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_NODE_DEP|2023-09-30T03:25:07|18769| |CLUSTER_RG_MOVE_ACQUIRE|epprd_rg| |NODE_UP_COMPLETE| |EVENT_PREAMBLE_END| Sep 30 2023 03:25:09 EVENT START: node_up epprda |2023-09-30T03:25:09|18769|EVENT START: node_up epprda| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-30T03:25:09.911761 + echo '|2023-09-30T03:25:09.911761|INFO: node_up|epprda' + 1>> /var/hacmp/availability/clavailability.log :node_up[182] version=%I% :node_up[185] NODENAME=epprda :node_up[185] export NODENAME :node_up[193] STATUS=0 :node_up[193] typeset -li STATUS :node_up[194] RC=0 :node_up[194] typeset -li RC :node_up[195] ENABLE_NFS_CROSS_MOUNT=false :node_up[196] START_MODE='' :node_up[196] typeset START_MODE :node_up[198] set -u :node_up[200] (( 1 < 1 )) :node_up[200] (( 1 > 2 )) :node_up[207] : serial number for this event is 18769 :node_up[210] [[ epprda == epprda ]] :node_up[213] : Remove the node halt lock file. :node_up[214] : Hereafter, clstrmgr failure leads to node halt :node_up[216] rm -f /usr/es/sbin/cluster/etc/ha_nodehalt.lock :node_up[219] (( 1 > 1 )) :node_up[256] : If RG_DEPENDENCIES=false, process RGs with clsetenvgrp :node_up[258] [[ TRUE == FALSE ]] :node_up[281] : localnode processing prior to RG acquisition :node_up[283] [[ epprda == epprda ]] :node_up[283] [[ '' != forced ]] :node_up[286] : Reserve Volume Groups using SCSIPR :node_up[288] clodmget -n -q policy=scsi -f value HACMPsplitmerge :node_up[288] SCSIPR_ENABLED='' :node_up[288] typeset SCSIPR_ENABLED :node_up[289] [[ '' == Yes ]] :node_up[334] : Setup VG fencing. This must be done prior to any potential disk access. :node_up[336] node_up_vg_fence_init :node_up[node_up_vg_fence_init:73] typeset VGs_on_line :node_up[node_up_vg_fence_init:74] typeset VG_name :node_up[node_up_vg_fence_init:75] typeset VG_ID :node_up[node_up_vg_fence_init:76] typeset VG_PV_list :node_up[node_up_vg_fence_init:79] : Find out what volume groups are currently on-line :node_up[node_up_vg_fence_init:81] lsvg -L -o :node_up[node_up_vg_fence_init:81] 2> /var/hacmp/log/node_up.lsvg.err :node_up[node_up_vg_fence_init:81] print caavg_private rootvg :node_up[node_up_vg_fence_init:81] VGs_on_line='caavg_private rootvg' :node_up[node_up_vg_fence_init:82] [[ -e /var/hacmp/log/node_up.lsvg.err ]] :node_up[node_up_vg_fence_init:82] [[ ! -s /var/hacmp/log/node_up.lsvg.err ]] :node_up[node_up_vg_fence_init:82] rm /var/hacmp/log/node_up.lsvg.err :node_up[node_up_vg_fence_init:85] : Clean up any old fence group files and stale fence groups. :node_up[node_up_vg_fence_init:86] : These are all of the form '/usr/es/sbin/cluster/etc/vg/.uud' :node_up[node_up_vg_fence_init:88] valid_vg_lst='' :node_up[node_up_vg_fence_init:89] lsvg -L :node_up[node_up_vg_fence_init:89] egrep -vw 'rootvg|caavg_private' :node_up[node_up_vg_fence_init:89] 2>> /var/hacmp/log/node_up.lsvg.err :node_up:datavg[node_up_vg_fence_init:91] PS4_LOOP=datavg :node_up:datavg[node_up_vg_fence_init:92] clodmget -q $'name like \'*VOLUME_GROUP\' and value = datavg' -f value -n HACMPresource :node_up:datavg[node_up_vg_fence_init:92] [[ -z datavg ]] :node_up:datavg[node_up_vg_fence_init:109] : Volume group datavg is an HACMP resource :node_up:datavg[node_up_vg_fence_init:111] [[ 'caavg_private rootvg' == ?(*\ )datavg?(\ *) ]] :node_up:datavg[node_up_vg_fence_init:115] fence_height=ro :node_up:datavg[node_up_vg_fence_init:119] : Recreate the fence group to match current volume group membership :node_up:datavg[node_up_vg_fence_init:121] cl_vg_fence_redo -c datavg ro :cl_vg_fence_redo[52] version=1.3 :cl_vg_fence_redo[55] RC=0 :cl_vg_fence_redo[55] typeset -li RC :cl_vg_fence_redo[58] : Check for optional -c parameter :cl_vg_fence_redo[60] [[ -c == -c ]] :cl_vg_fence_redo[62] c_flag=-c :cl_vg_fence_redo[63] shift :cl_vg_fence_redo[66] VG=datavg :cl_vg_fence_redo[67] UUID_file=/usr/es/sbin/cluster/etc/vg/datavg.uuid :cl_vg_fence_redo[68] fence_height=ro :cl_vg_fence_redo[70] [[ -s /usr/es/sbin/cluster/etc/vg/datavg.uuid ]] :cl_vg_fence_redo[83] [[ -z ro ]] :cl_vg_fence_redo[98] : Rebuild the fence group for datavg :cl_vg_fence_redo[99] : First, find the disks in the volume group :cl_vg_fence_redo[101] /usr/sbin/getlvodm -v datavg :cl_vg_fence_redo[101] VGID=00c44af100004b00000001851e9dc053 :cl_vg_fence_redo[103] [[ -n 00c44af100004b00000001851e9dc053 ]] :cl_vg_fence_redo[106] : Create a fence group for datavg :cl_vg_fence_redo[108] /usr/sbin/getlvodm -w 00c44af100004b00000001851e9dc053 :cl_vg_fence_redo[108] cut -f2 '-d ' :cl_vg_fence_redo[108] PV_disk_list=$'hdisk2\nhdisk3\nhdisk4\nhdisk5\nhdisk6\nhdisk7\nhdisk8' :cl_vg_fence_redo[109] cl_vg_fence_init -c datavg ro hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8 cl_vg_fence_init[145]: version @(#) 7d4c34b 43haes/usr/sbin/cluster/events/utils/cl_vg_fence_init.c, 726, 2147A_aha726, Feb 05 2021 09:50 PM cl_vg_fence_init[204]: odm_initialize() cl_vg_fence_init[231]: calloc(7, 64) cl_vg_fence_init[259]: getattr(hdisk2, PCM) = PCM/friend/fcpother cl_vg_fence_init[259]: getattr(hdisk3, PCM) = PCM/friend/fcpother cl_vg_fence_init[259]: getattr(hdisk4, PCM) = PCM/friend/fcpother cl_vg_fence_init[259]: getattr(hdisk5, PCM) = PCM/friend/fcpother cl_vg_fence_init[259]: getattr(hdisk6, PCM) = PCM/friend/fcpother cl_vg_fence_init[259]: getattr(hdisk7, PCM) = PCM/friend/fcpother cl_vg_fence_init[259]: getattr(hdisk8, PCM) = PCM/friend/fcpother cl_vg_fence_init[294]: sfwAddFenceGroup(datavg, 7, hdisk2, hdisk3, hdisk4, hdisk5, hdisk6, hdisk7, hdisk8) cl_vg_fence_init[374]: free(200101b8) cl_vg_fence_init[400]: creat(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_vg_fence_init[408]: write(/usr/es/sbin/cluster/etc/vg/datavg.uuid, 16) cl_g_fence_init[442]: sfwSetFenceGroup(vg=datavg, height=ro(2) uuid=ec2db4422261eae02091227fb9e53c88):cl_vg_fence_redo[110] RC=0 :cl_vg_fence_redo[111] : Exit status is 0 from cl_vg_fence_init datavg ro hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8 :cl_vg_fence_redo[113] (( 0 != 0 )) :cl_vg_fence_redo[123] return 0 :node_up:datavg[node_up_vg_fence_init:122] valid_vg_lst=' datavg' :node_up:altinst_rootvg[node_up_vg_fence_init:91] PS4_LOOP=altinst_rootvg :node_up:altinst_rootvg[node_up_vg_fence_init:92] clodmget -q $'name like \'*VOLUME_GROUP\' and value = altinst_rootvg' -f value -n HACMPresource :node_up:altinst_rootvg[node_up_vg_fence_init:92] [[ -z '' ]] :node_up:altinst_rootvg[node_up_vg_fence_init:94] [[ -s /usr/es/sbin/cluster/etc/vg/altinst_rootvg.uuid ]] :node_up:altinst_rootvg[node_up_vg_fence_init:125] [[ -e /var/hacmp/log/node_up.lsvg.err ]] :node_up:altinst_rootvg[node_up_vg_fence_init:125] [[ ! -s /var/hacmp/log/node_up.lsvg.err ]] :node_up:altinst_rootvg[node_up_vg_fence_init:125] rm /var/hacmp/log/node_up.lsvg.err :node_up:altinst_rootvg[node_up_vg_fence_init:128] : Any remaining old fence group files are from stale fence groups, :node_up:altinst_rootvg[node_up_vg_fence_init:129] : so remove them :node_up:altinst_rootvg[node_up_vg_fence_init:131] [[ -s /usr/es/sbin/cluster/etc/vg/altinst_rootvg.uuid ]] :node_up:altinst_rootvg[node_up_vg_fence_init:158] unset PS4_LOOP :node_up[node_up_vg_fence_init:160] return 0 :node_up[344] : If WLM manager clases have been configured for an application server, process them now :node_up[346] clodmget -q $'name like \'WLM_*\'' -f id HACMPresource :node_up[346] [[ -n '' ]] :node_up[371] : Call ss-load replicated resource methods if they are defined :node_up[373] cl_rrmethods2call ss_load :cl_rrmethods2call[56] version=%I% :cl_rrmethods2call[84] RRMETHODS='' :cl_rrmethods2call[85] NEED_RR_ENV_VARS=no :cl_rrmethods2call[104] : The load and unload methods if defined are returned on the :cl_rrmethods2call[105] : local node :cl_rrmethods2call[107] [[ epprda == epprda ]] :cl_rrmethods2call[109] NEED_RR_ENV_VARS=yes :cl_rrmethods2call[129] : Set the '*_REP_RESOURCE' variables if needed. :cl_rrmethods2call[131] [[ yes == yes ]] :cl_rrmethods2call[133] cllsres :cl_rrmethods2call[133] 2> /dev/null :cl_rrmethods2call[133] eval APPLICATIONS='"epprd_app"' EXPORT_FILESYSTEM='"/board_org' '/sapmnt/EPP"' FILESYSTEM='""' FORCED_VARYON='"false"' FSCHECK_TOOL='"fsck"' FS_BEFORE_IPADDR='"false"' MOUNT_FILESYSTEM='"/board;/board_org"' RECOVERY_METHOD='"sequential"' SERVICE_LABEL='"epprd"' SSA_DISK_FENCING='"false"' VG_AUTO_IMPORT='"false"' VOLUME_GROUP='"datavg"' USERDEFINED_RESOURCES='""' :cl_rrmethods2call[1] APPLICATIONS=epprd_app :cl_rrmethods2call[1] EXPORT_FILESYSTEM='/board_org /sapmnt/EPP' :cl_rrmethods2call[1] FILESYSTEM='' :cl_rrmethods2call[1] FORCED_VARYON=false :cl_rrmethods2call[1] FSCHECK_TOOL=fsck :cl_rrmethods2call[1] FS_BEFORE_IPADDR=false :cl_rrmethods2call[1] MOUNT_FILESYSTEM='/board;/board_org' :cl_rrmethods2call[1] RECOVERY_METHOD=sequential :cl_rrmethods2call[1] SERVICE_LABEL=epprd :cl_rrmethods2call[1] SSA_DISK_FENCING=false :cl_rrmethods2call[1] VG_AUTO_IMPORT=false :cl_rrmethods2call[1] VOLUME_GROUP=datavg :cl_rrmethods2call[1] USERDEFINED_RESOURCES='' :cl_rrmethods2call[137] [[ -n '' ]] :cl_rrmethods2call[142] [[ -n '' ]] :cl_rrmethods2call[147] [[ -n '' ]] :cl_rrmethods2call[152] [[ -n '' ]] :cl_rrmethods2call[157] [[ -n '' ]] :cl_rrmethods2call[162] [[ -n '' ]] :cl_rrmethods2call[167] [[ -n '' ]] :cl_rrmethods2call[172] [[ -n '' ]] :cl_rrmethods2call[182] [[ -z '' ]] :cl_rrmethods2call[184] typeset sysmgdata :cl_rrmethods2call[185] typeset reposmgdata :cl_rrmethods2call[186] [[ -x /usr/es/sbin/cluster/xd_generic/xd_cli/clxd_list_mg_smit ]] :cl_rrmethods2call[191] [[ -n '' ]] :cl_rrmethods2call[191] [[ -n '' ]] :cl_rrmethods2call[197] echo '' :cl_rrmethods2call[199] return 0 :node_up[373] METHODS='' :node_up[387] : When the local node is brought up, reset the resource locator info. :node_up[390] clchdaemons -r -d clstrmgr_scripts -t resource_locator :node_up[397] [[ '' != manual ]] :node_up[400] : attempt passive varyon for any ECM VGs in serial RGs :node_up[405] cl_pvo :cl_pvo[590] version=1.34.2.12 :cl_pvo(0.007)[592] PS4_TIMER=true :cl_pvo(0.007)[594] rc=0 :cl_pvo(0.007)[594] typeset -li rc :cl_pvo(0.007)[595] mode=0 :cl_pvo(0.008)[595] typeset -li mode :cl_pvo(0.008)[600] ENODEV=19 :cl_pvo(0.008)[600] typeset -li ENODEV :cl_pvo(0.008)[601] vg_force_on_flag='' :cl_pvo(0.008)[605] : Pick up any passed options :cl_pvo(0.008)[607] rg_list='' :cl_pvo(0.008)[607] export rg_list :cl_pvo(0.008)[608] vg_list='' :cl_pvo(0.008)[609] fs_list='' :cl_pvo(0.008)[610] all_vgs_flag='' :cl_pvo(0.008)[611] [[ -z '' ]] :cl_pvo(0.008)[613] all_vgs_flag=true :cl_pvo(0.008)[615] getopts :g:v:f: option :cl_pvo(0.008)[629] shift 0 :cl_pvo(0.008)[630] [[ -n '' ]] :cl_pvo(0.008)[645] O_flag='' :cl_pvo(0.008)[646] odmget -q 'attribute = varyon_state' PdAt :cl_pvo(0.010)[646] [[ -n $'\nPdAt:\n\tuniquetype = "logical_volume/vgsubclass/vgtype"\n\tattribute = "varyon_state"\n\tdeflt = "0"\n\tvalues = "0,1,2,3"\n\twidth = ""\n\ttype = "R"\n\tgeneric = ""\n\trep = "l"\n\tnls_index = 0' ]] :cl_pvo(0.010)[649] : LVM may record that a volume group was varied on from an earlier :cl_pvo(0.010)[650] : IPL. Rely on HA state tracking, and override the LVM check :cl_pvo(0.010)[652] O_flag=-O :cl_pvo(0.010)[655] [[ -n true ]] :cl_pvo(0.010)[657] [[ -z epprda ]] :cl_pvo(0.010)[661] [[ -z epprda ]] :cl_pvo(0.010)[672] : Since no resource names of any type were explicitly passed, go :cl_pvo(0.010)[673] : find all the resource groups this node is a member of. :cl_pvo(0.012)[675] clodmget -f group,nodes HACMPgroup :cl_pvo(0.015)[675] egrep '[: ]epprda( |$)' :cl_pvo(0.016)[675] cut -f1 -d: :cl_pvo(0.019)[675] rg_list=epprd_rg :cl_pvo(0.019)[676] [[ -z epprd_rg ]] :cl_pvo(0.019)[686] [[ -z '' ]] :cl_pvo(0.019)[686] [[ -n epprd_rg ]] :cl_pvo(0.019)[689] : Since no volume groups were passed, go find all the volume groups :cl_pvo(0.019)[690] : in the given/extracted list of resource groups. :cl_pvo(0.019)[695] : For each resource group that this node participates in, get the :cl_pvo(0.019)[696] : list of serial access volume groups in that resource group. :cl_pvo(0.019)[698] clodmget -q 'group = epprd_rg and name = VOLUME_GROUP' -f value -n HACMPresource :cl_pvo(0.022)[698] rg_vg_list=datavg :cl_pvo(0.022)[700] [[ -n datavg ]] :cl_pvo(0.022)[702] [[ -n true ]] :cl_pvo(0.022)[703] odmget -q $'group = epprd_rg and name like \'*REP_RESOURCE\'' HACMPresource :cl_pvo(0.024)[703] [[ -n '' ]] :cl_pvo(0.024)[739] : If there were any serial access volume groups for this node and :cl_pvo(0.024)[740] : that resource group, add them to the list. :cl_pvo(0.024)[742] vg_list=datavg :cl_pvo(0.024)[747] [[ -z '' ]] :cl_pvo(0.024)[747] [[ -n epprd_rg ]] :cl_pvo(0.024)[750] : Since no file systems were passed, go find all the file systems in :cl_pvo(0.024)[751] : the given/extracted list of resource groups. :cl_pvo(0.024)[755] : For each resource group that this node participates in, get the :cl_pvo(0.024)[756] : list of file systems in that resource group. :cl_pvo(0.024)[761] clodmget -q 'group = epprd_rg and name = FILESYSTEM' -f value -n HACMPresource :cl_pvo(0.027)[761] rg_fs_list=ALL :cl_pvo(0.027)[763] [[ -n ALL ]] :cl_pvo(0.027)[765] [[ -n true ]] :cl_pvo(0.027)[766] odmget -q $'group = epprd_rg and name like \'*REP_RESOURCE\'' HACMPresource :cl_pvo(0.029)[766] [[ -n '' ]] :cl_pvo(0.029)[780] : If there were any file systems for this node and that resource :cl_pvo(0.029)[781] : group, add them to the list :cl_pvo(0.029)[783] fs_list=ALL :cl_pvo(0.029)[790] [[ ALL == ALL ]] :cl_pvo(0.030)[792] continue :cl_pvo(0.030)[801] : Remove any duplicates from the volume group list :cl_pvo(0.031)[803] echo datavg :cl_pvo(0.033)[803] tr ' ' '\n' :cl_pvo(0.034)[803] sort -u :cl_pvo(0.038)[803] vg_list=datavg :cl_pvo(0.038)[805] [[ -z datavg ]] :cl_pvo(0.038)[814] : Find out what volume groups are currently on-line :cl_pvo(0.038)[816] lsvg -L -o :cl_pvo(0.039)[816] 2> /tmp/lsvg.err :cl_pvo(0.042)[816] print caavg_private rootvg :cl_pvo(0.042)[816] ON_LIST='caavg_private rootvg' :cl_pvo(0.042)[819] : If this node is the first node up in the cluster, :cl_pvo(0.042)[820] : we want to do a sync for each of the volume groups :cl_pvo(0.042)[821] : we bring on-line. If multiple cluster nodes are already active, the :cl_pvo(0.042)[822] : sync is unnecesary, having been done once, and possibly disruptive. :cl_pvo(0.042)[824] [[ -n '' ]] :cl_pvo(0.042)[833] : No other cluster nodes are present, default to sync just to be sure :cl_pvo(0.042)[834] : the volume group is in a good state :cl_pvo(0.042)[836] syncflag='' :cl_pvo(0.042)[840] : Now, process each volume group in the list of those this node acceses. :cl_pvo(0.042):datavg[844] PS4_LOOP=datavg :cl_pvo(0.042):datavg[844] typeset PS4_LOOP :cl_pvo(0.042):datavg[846] : Skip any concurrent GMVGs, they should never be pvo. :cl_pvo(0.042):datavg[848] odmget -q name='GMVG_REP_RESOURCE AND value=datavg' HACMPresource :cl_pvo(0.044):datavg[848] [[ -n '' ]] :cl_pvo(0.044):datavg[853] : The VGID is what the LVM low level commands used below use to :cl_pvo(0.044):datavg[854] : identify the volume group. :cl_pvo(0.044):datavg[856] /usr/sbin/getlvodm -v datavg :cl_pvo(0.047):datavg[856] vgid=00c44af100004b00000001851e9dc053 :cl_pvo(0.047):datavg[860] mode=99 :cl_pvo(0.047):datavg[863] : Attempt to determine the mode of the volume group - is it an :cl_pvo(0.047):datavg[864] : enhanced concurrent mode volume group or not. :cl_pvo(0.047):datavg[868] export mode :cl_pvo(0.047):datavg[869] hdisklist='' :cl_pvo(0.048):datavg[870] /usr/sbin/getlvodm -w 00c44af100004b00000001851e9dc053 :cl_pvo(0.050):datavg[870] read pvid hdisk :cl_pvo(0.050):datavg[871] hdisklist=hdisk2 :cl_pvo(0.050):datavg[870] read pvid hdisk :cl_pvo(0.050):datavg[871] hdisklist='hdisk2 hdisk3' :cl_pvo(0.050):datavg[870] read pvid hdisk :cl_pvo(0.050):datavg[871] hdisklist='hdisk2 hdisk3 hdisk4' :cl_pvo(0.050):datavg[870] read pvid hdisk :cl_pvo(0.050):datavg[871] hdisklist='hdisk2 hdisk3 hdisk4 hdisk5' :cl_pvo(0.050):datavg[870] read pvid hdisk :cl_pvo(0.050):datavg[871] hdisklist='hdisk2 hdisk3 hdisk4 hdisk5 hdisk6' :cl_pvo(0.050):datavg[870] read pvid hdisk :cl_pvo(0.050):datavg[871] hdisklist='hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7' :cl_pvo(0.050):datavg[870] read pvid hdisk :cl_pvo(0.051):datavg[871] hdisklist='hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8' :cl_pvo(0.051):datavg[870] read pvid hdisk :cl_pvo(0.051):datavg[873] get_vg_mode 'hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8' 00c44af100004b00000001851e9dc053 datavg :cl_pvo(0.051):datavg[get_vg_mode:289] typeset vgid vg_name syncflag hdisklist :cl_pvo(0.051):datavg[get_vg_mode:290] typeset GROUP_NAME FORCED_VARYON :cl_pvo(0.051):datavg[get_vg_mode:291] TUR_RC=0 :cl_pvo(0.051):datavg[get_vg_mode:291] typeset -li TUR_RC :cl_pvo(0.051):datavg[get_vg_mode:292] vg_disks=0 :cl_pvo(0.051):datavg[get_vg_mode:292] typeset -li vg_disks :cl_pvo(0.051):datavg[get_vg_mode:293] max_disk_test=0 :cl_pvo(0.051):datavg[get_vg_mode:293] typeset -li max_disk_test :cl_pvo(0.051):datavg[get_vg_mode:294] disk_tested=0 :cl_pvo(0.051):datavg[get_vg_mode:294] typeset -li disk_tested :cl_pvo(0.051):datavg[get_vg_mode:296] hdisklist='hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8' :cl_pvo(0.051):datavg[get_vg_mode:297] vgid=00c44af100004b00000001851e9dc053 :cl_pvo(0.051):datavg[get_vg_mode:298] vg_name=datavg :cl_pvo(0.051):datavg[get_vg_mode:299] syncflag='' :cl_pvo(0.051):datavg[get_vg_mode:301] odmget -q name='datavg and attribute=conc_capable and value=y' CuAt :cl_pvo(0.052):datavg[get_vg_mode:301] ODMDIR=/etc/objrepos :cl_pvo(0.053):datavg[get_vg_mode:301] [[ -n $'\nCuAt:\n\tname = "datavg"\n\tattribute = "conc_capable"\n\tvalue = "y"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "l"\n\tnls_index = 0' ]] :cl_pvo(0.053):datavg[get_vg_mode:304] : If LVM thinks that this volume group is concurrent capable, that :cl_pvo(0.053):datavg[get_vg_mode:305] : is good enough :cl_pvo(0.053):datavg[get_vg_mode:307] mode=32 :cl_pvo(0.053):datavg[get_vg_mode:308] return :cl_pvo(0.053):datavg[876] : See if the volume group is already on line. This should :cl_pvo(0.053):datavg[877] : only happen if it were manually brought on line outside of HACMP :cl_pvo(0.053):datavg[878] : control, or left on-line after a forced down. :cl_pvo(0.053):datavg[880] vg_on_mode='' :cl_pvo(0.053):datavg[880] typeset vg_on_mode :cl_pvo(0.053):datavg[881] [[ 'caavg_private rootvg' == ?(*\ )datavg?(\ *) ]] :cl_pvo(0.054):datavg[891] lsvg -L datavg :cl_pvo(0.054):datavg[891] 2> /dev/null :cl_pvo(0.057):datavg[891] grep -q -i -w passive-only :cl_pvo(0.059):datavg[896] [[ -n '' ]] :cl_pvo(0.059):datavg[976] : Volume group is currently not on line in any mode :cl_pvo(0.059):datavg[978] (( 99 == 32 )) :cl_pvo(0.059):datavg[1041] (( 32 != 32 && 99 != 32 )) :cl_pvo(0.059):datavg[1060] (( 32 == 32 )) :cl_pvo(0.059):datavg[1063] : If this is actually an enhanced concurrent mode volume group, :cl_pvo(0.059):datavg[1064] : bring it on line in passive mode. Other kinds are just skipped. :cl_pvo(0.059):datavg[1066] varyonp datavg 'hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8' :cl_pvo(0.059):datavg[varyonp:417] NOQUORUM=20 :cl_pvo(0.059):datavg[varyonp:417] typeset -li NOQUORUM :cl_pvo(0.060):datavg[varyonp:418] rc=0 :cl_pvo(0.060):datavg[varyonp:418] typeset -li rc :cl_pvo(0.060):datavg[varyonp:421] : Pick up passed parameters: volume group and sync flag :cl_pvo(0.060):datavg[varyonp:423] typeset syncflag hdisklist vg :cl_pvo(0.060):datavg[varyonp:424] vg=datavg :cl_pvo(0.060):datavg[varyonp:425] hdisklist='hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8' :cl_pvo(0.060):datavg[varyonp:426] syncflag='' :cl_pvo(0.060):datavg[varyonp:429] : Make sure the volume group is not fenced. Varyon requires read write :cl_pvo(0.060):datavg[varyonp:430] : access. :cl_pvo(0.060):datavg[varyonp:432] cl_set_vg_fence_height -c datavg rw cl_set_vg_fence_height[126]: version @(#)10 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 cl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY) cl_set_vg_fence_height[214]: read(datavg, 16) cl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0)) :cl_pvo(0.063):datavg[varyonp:433] RC=0 :cl_pvo(0.063):datavg[varyonp:434] (( 19 == 0 )) :cl_pvo(0.063):datavg[varyonp:442] : Return code from volume group fencing for datavg is 0 :cl_pvo(0.063):datavg[varyonp:443] (( 0 != 0 )) :cl_pvo(0.063):datavg[varyonp:455] : Try to vary on the volume group in passive concurrent mode :cl_pvo(0.063):datavg[varyonp:457] varyonvg -c -P -O datavg :cl_pvo(0.541):datavg[varyonp:458] rc=0 :cl_pvo(0.541):datavg[varyonp:460] (( 0 != 0 )) :cl_pvo(0.541):datavg[varyonp:483] : exit status of varyonvg -c -P -O datavg is: 0 :cl_pvo(0.541):datavg[varyonp:485] (( 0 == 20 )) :cl_pvo(0.541):datavg[varyonp:505] : If varyon was ultimately unsuccessful, note the error :cl_pvo(0.541):datavg[varyonp:507] (( 0 != 0 )) :cl_pvo(0.541):datavg[varyonp:511] : If varyonvg was successful, try to recover :cl_pvo(0.541):datavg[varyonp:512] : any missing or removed disks :cl_pvo(0.541):datavg[varyonp:514] mr_recovery datavg :cl_pvo(0.541):datavg[mr_recovery:59] vg=datavg :cl_pvo(0.541):datavg[mr_recovery:59] typeset vg :cl_pvo(0.541):datavg[mr_recovery:60] typeset mr_disks :cl_pvo(0.541):datavg[mr_recovery:61] typeset disk_list :cl_pvo(0.541):datavg[mr_recovery:62] typeset hdisk :cl_pvo(0.543):datavg[mr_recovery:64] lsvg -p datavg :cl_pvo(0.543):datavg[mr_recovery:64] 2> /dev/null :cl_pvo(0.546):datavg[mr_recovery:64] grep -iw missing :cl_pvo(0.564):datavg[mr_recovery:64] missing_disks='' :cl_pvo(0.564):datavg[mr_recovery:66] [[ -n '' ]] :cl_pvo(0.566):datavg[mr_recovery:89] lsvg -p datavg :cl_pvo(0.566):datavg[mr_recovery:89] 2> /dev/null :cl_pvo(0.568):datavg[mr_recovery:89] grep -iw removed :cl_pvo(0.587):datavg[mr_recovery:89] removed_disks='' :cl_pvo(0.587):datavg[mr_recovery:91] [[ -n '' ]] :cl_pvo(0.587):datavg[varyonp:518] : Restore the fence height to read only, for passive varyon :cl_pvo(0.587):datavg[varyonp:520] cl_set_vg_fence_height -c datavg ro cl_set_vg_fence_height[126]: version @(#)10 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 cl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY) cl_set_vg_fence_height[214]: read(datavg, 16) cl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=ro(2)) :cl_pvo(0.590):datavg[varyonp:521] RC=0 :cl_pvo(0.590):datavg[varyonp:522] : Return code from volume group fencing for datavg is 0 :cl_pvo(0.590):datavg[varyonp:523] (( 0 != 0 )) :cl_pvo(0.590):datavg[varyonp:533] return 0 :cl_pvo(0.590):datavg[1073] return 0 :node_up[406] : exit status of cl_pvo is: 0 :node_up[422] ls '/dev/vpath*' :node_up[422] 1> /dev/null 2>& 1 :node_up[432] : Configure any split and merge policies. :node_up[434] rm -f /usr/es/sbin/cluster/etc/smm_oflag :node_up[435] [[ -z '' ]] :node_up[438] : If this is the first node up, configure split merge handling. :node_up[440] cl_cfg_sm_rt :cl_cfg_sm_rt[738] version=1.34 :cl_cfg_sm_rt[741] clctrl_rc=0 :cl_cfg_sm_rt[741] typeset -li clctrl_rc :cl_cfg_sm_rt[742] src_rc=0 :cl_cfg_sm_rt[742] typeset -li src_rc :cl_cfg_sm_rt[743] cl_migcheck_rc=0 :cl_cfg_sm_rt[743] typeset -li cl_migcheck_rc :cl_cfg_sm_rt[744] bad_policy='' :cl_cfg_sm_rt[745] SMP='' :cl_cfg_sm_rt[748] : If we are in migration - if all nodes are not up to this level - do not :cl_cfg_sm_rt[749] : attempt any configuration. :cl_cfg_sm_rt[751] clmixver :cl_cfg_sm_rt[751] version=22 :cl_cfg_sm_rt[752] (( 22 < 14 )) :cl_cfg_sm_rt[761] : Retrieve configured policies :cl_cfg_sm_rt[763] clodmget -q 'policy = action' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[763] Action=Reboot :cl_cfg_sm_rt[764] clodmget -q 'policy = split' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[764] Split=None :cl_cfg_sm_rt[765] clodmget -q 'policy = merge' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[765] Merge=Majority :cl_cfg_sm_rt[766] clodmget -q 'policy = tiebreaker' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[766] TieBreaker='' :cl_cfg_sm_rt[767] clodmget -q 'policy = nfs_quorumserver' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[767] nfs_quorumserver='' :cl_cfg_sm_rt[768] clodmget -q 'policy = local_quorumdirectory' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[768] local_quorumdirectory='' :cl_cfg_sm_rt[769] clodmget -q 'policy = remote_quorumdirectory' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[769] remote_quorumdirectory='' :cl_cfg_sm_rt[770] clodmget -q 'policy = anhp' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[770] is_anhp='' :cl_cfg_sm_rt[771] clodmget -q 'policy = scsi' -f value -n HACMPsplitmerge :cl_cfg_sm_rt[771] is_scsi='' :cl_cfg_sm_rt[772] clodmget -q name=clutils.log -f value -n HACMPlogs :cl_cfg_sm_rt[772] CLUTILS_LOG=/var/hacmp/log/clutils.log :cl_cfg_sm_rt[775] : If policies are unset, apply the default policies :cl_cfg_sm_rt[777] Split=None :cl_cfg_sm_rt[778] Merge=Majority :cl_cfg_sm_rt[779] Action=Reboot :cl_cfg_sm_rt[782] : If tiebreaker was a configured policy, be sure that one was defined :cl_cfg_sm_rt[784] [[ -z '' ]] :cl_cfg_sm_rt[786] [[ None == TieBreaker ]] :cl_cfg_sm_rt[790] [[ Majority == TieBreaker ]] :cl_cfg_sm_rt[795] [[ -n '' ]] :cl_cfg_sm_rt[807] : Set up the interlock file for use by smcaactrl. This tells :cl_cfg_sm_rt[808] : smcaactrl to allow the following CAA operations. :cl_cfg_sm_rt[810] date :cl_cfg_sm_rt[810] 1> /usr/es/sbin/cluster/etc/cl_cfg_sm_rt.15991052 :cl_cfg_sm_rt[811] trap 'on_exit $?' EXIT :cl_cfg_sm_rt[814] : Setting up CAA tunable local_merge_policy :cl_cfg_sm_rt[816] typeset -i caa_level :cl_cfg_sm_rt[817] lslpp -l bos.cluster.rte :cl_cfg_sm_rt[817] grep bos.cluster.rte :cl_cfg_sm_rt[817] uniq :cl_cfg_sm_rt[817] awk -F ' ' '{print $2}' :cl_cfg_sm_rt[817] tr -d . :cl_cfg_sm_rt[817] caa_level=725102 :cl_cfg_sm_rt[818] (( 725102 >=7140 )) :cl_cfg_sm_rt[819] configure_local_merge_policy :cl_cfg_sm_rt[configure_local_merge_policy:665] typeset -i clctrl_rc :cl_cfg_sm_rt[configure_local_merge_policy:666] [[ -z '' ]] :cl_cfg_sm_rt[configure_local_merge_policy:666] [[ -z '' ]] :cl_cfg_sm_rt[configure_local_merge_policy:667] capability=0 :cl_cfg_sm_rt[configure_local_merge_policy:667] typeset -i capability :cl_cfg_sm_rt[configure_local_merge_policy:669] cl_get_capabilities -i 6 :cl_cfg_sm_rt[configure_local_merge_policy:669] 2>& 1 :cl_cfg_sm_rt[configure_local_merge_policy:669] caa_sm_capability=$':cl_cfg_sm_rt[configure_local_merge_policy:669] LC_ALL=C\ncl_get_capabilities[178]: version 1.9\ncapability is 6\n\tid: 6 version: 1 flag: 1 ' :cl_cfg_sm_rt[configure_local_merge_policy:670] [[ -n $':cl_cfg_sm_rt[configure_local_merge_policy:669] LC_ALL=C\ncl_get_capabilities[178]: version 1.9\ncapability is 6\n\tid: 6 version: 1 flag: 1 ' ]] :cl_cfg_sm_rt[configure_local_merge_policy:674] : If Sub Cluster Split Merge capability is defined :cl_cfg_sm_rt[configure_local_merge_policy:675] : and globally available, then capability is set to 1 :cl_cfg_sm_rt[configure_local_merge_policy:677] capability='1 ' :cl_cfg_sm_rt[configure_local_merge_policy:680] (( 1 == 1 )) :cl_cfg_sm_rt[configure_local_merge_policy:682] : Sub Cluster Split-Merge capability is available cluster wide :cl_cfg_sm_rt[configure_local_merge_policy:684] [[ Majority != None ]] :cl_cfg_sm_rt[configure_local_merge_policy:686] clctrl -tune -o local_merge_policy=h 1 tunable updated on cluster epprda_cluster. :cl_cfg_sm_rt[configure_local_merge_policy:687] clctrl_rc=0 :cl_cfg_sm_rt[configure_local_merge_policy:688] (( 0 != 0 )) :cl_cfg_sm_rt[configure_local_merge_policy:725] return 0 :cl_cfg_sm_rt[820] rc=0 :cl_cfg_sm_rt[820] typeset -i rc :cl_cfg_sm_rt[821] (( 0 < 0 )) :cl_cfg_sm_rt[827] : Configure CAA in accordance with the specified or defaulted policies :cl_cfg_sm_rt[828] : for Merge :cl_cfg_sm_rt[830] clctrl -tune -a epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).communication_mode = u epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).config_timeout = 240 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).deadman_mode = a epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).dr_enabled = 1 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).link_timeout = 30000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).local_merge_policy = h epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).network_fdt = 20000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).no_if_traffic_monitor = 0 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).node_down_delay = 10000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).node_timeout = 30000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).packet_ttl = 32 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).remote_hb_factor = 1 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).repos_mode = e epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).site_merge_policy = h :cl_cfg_sm_rt[831] clctrl_rc=0 :cl_cfg_sm_rt[832] : Return code from 'clctrl -tune -a' is 0 :cl_cfg_sm_rt[835] : If the current deadman mode is not set to ASSERT, :cl_cfg_sm_rt[836] : change it to that :cl_cfg_sm_rt[842] clctrl -tune -x deadman_mode :cl_cfg_sm_rt[842] cut -f2 -d: :cl_cfg_sm_rt[842] current_deadman_mode=a :cl_cfg_sm_rt[843] [[ a != a ]] :cl_cfg_sm_rt[849] : Determine the current site merge policy, to see if it needs :cl_cfg_sm_rt[850] : to be changed :cl_cfg_sm_rt[852] clctrl -tune -x site_merge_policy :cl_cfg_sm_rt[852] cut -f2 -d: :cl_cfg_sm_rt[852] current_merge_policy=h :cl_cfg_sm_rt[854] [[ Majority == Manual ]] :cl_cfg_sm_rt[865] [[ Majority == None ]] :cl_cfg_sm_rt[878] : Everything else - tie breaker, majority, nfs - is heuristic merge policy :cl_cfg_sm_rt[880] [[ h != h ]] :cl_cfg_sm_rt[886] clctrl_rc=0 :cl_cfg_sm_rt[887] (( 0 != 0 )) :cl_cfg_sm_rt[901] [[ -n '' ]] :cl_cfg_sm_rt[919] RSCT_START_RETRIES=0 :cl_cfg_sm_rt[919] typeset -li RSCT_START_RETRIES :cl_cfg_sm_rt[920] MIN_RSCT_RETRIES=1 :cl_cfg_sm_rt[920] typeset -li MIN_RSCT_RETRIES :cl_cfg_sm_rt[921] MAX_RSCT_RETRIES=15 :cl_cfg_sm_rt[921] typeset -li MAX_RSCT_RETRIES :cl_cfg_sm_rt[922] grep ^RSCT_START_RETRIES /etc/environment :cl_cfg_sm_rt[922] eval :cl_cfg_sm_rt[923] (( 0 < 1 )) :cl_cfg_sm_rt[923] RSCT_START_RETRIES=1 :cl_cfg_sm_rt[924] (( 1 > 15 )) :cl_cfg_sm_rt[926] RSCT_TB_WAITTIME=0 :cl_cfg_sm_rt[926] typeset -li RSCT_TB_WAITTIME :cl_cfg_sm_rt[927] grep ^RSCT_TB_WAITTIME /etc/environment :cl_cfg_sm_rt[927] eval :cl_cfg_sm_rt[928] (( 0 <= 0 )) :cl_cfg_sm_rt[928] RSCT_TB_WAITTIME=30 :cl_cfg_sm_rt[930] RSCT_START_WAIT=0 :cl_cfg_sm_rt[930] typeset -li RSCT_START_WAIT :cl_cfg_sm_rt[931] MIN_RSCT_WAIT=10 :cl_cfg_sm_rt[931] typeset -li MIN_RSCT_WAIT :cl_cfg_sm_rt[932] MAX_RSCT_WAIT=60 :cl_cfg_sm_rt[932] typeset -li MAX_RSCT_WAIT :cl_cfg_sm_rt[933] grep ^RSCT_START_WAIT /etc/environment :cl_cfg_sm_rt[933] eval :cl_cfg_sm_rt[934] (( 0 < 10 )) :cl_cfg_sm_rt[934] RSCT_START_WAIT=10 :cl_cfg_sm_rt[935] (( 10 > 60 )) :cl_cfg_sm_rt[937] (( retries=0)) :cl_cfg_sm_rt[937] (( 0 < 1)) :cl_cfg_sm_rt[939] lsrsrc IBM.PeerNode :cl_cfg_sm_rt[939] 1>> /var/hacmp/log/clutils.log 2>& 1 :cl_cfg_sm_rt[941] break :cl_cfg_sm_rt[947] (( 0 >= 1 )) :cl_cfg_sm_rt[954] : Configure RSCT in accordance with the specified or defaulted policies :cl_cfg_sm_rt[955] : for Split :cl_cfg_sm_rt[965] CT_MANAGEMENT_SCOPE=2 :cl_cfg_sm_rt[965] export CT_MANAGEMENT_SCOPE :cl_cfg_sm_rt[966] lsrsrc -t -c -x IBM.PeerNode OpQuorumTieBreaker :cl_cfg_sm_rt[966] Current_TB='"Success" ' :cl_cfg_sm_rt[967] Current_TB='"Success' :cl_cfg_sm_rt[968] Current_TB=Success :cl_cfg_sm_rt[969] [[ None == None ]] :cl_cfg_sm_rt[971] [[ Success == Success ]] :cl_cfg_sm_rt[973] chrsrc -c IBM.PeerNode OpQuorumTieBreaker=Operator :cl_cfg_sm_rt[974] src_rc=0 :cl_cfg_sm_rt[975] (( 0 != 0 )) :cl_cfg_sm_rt[981] (( 0 == 0 )) :cl_cfg_sm_rt[983] chrsrc -s Name='="Success"' IBM.TieBreaker PostReserveWaitTime=30 :cl_cfg_sm_rt[984] src_rc=0 :cl_cfg_sm_rt[985] (( 0 != 0 )) :cl_cfg_sm_rt[990] chrsrc -c IBM.PeerNode OpQuorumTieBreaker=Success :cl_cfg_sm_rt[991] src_rc=0 :cl_cfg_sm_rt[992] (( 0 != 0 )) :cl_cfg_sm_rt[1044] src_rc=0 :cl_cfg_sm_rt[1045] (( 0 != 0 )) :cl_cfg_sm_rt[1053] : Configure RSCT Action :cl_cfg_sm_rt[1055] chrsrc -c IBM.PeerNode QuorumType=4 :cl_cfg_sm_rt[1056] src_rc=0 :cl_cfg_sm_rt[1057] (( 0 != 0 )) :cl_cfg_sm_rt[1064] chrsrc -c IBM.PeerNode CriticalMode=2 :cl_cfg_sm_rt[1065] src_rc=0 :cl_cfg_sm_rt[1066] (( 0 != 0 )) :cl_cfg_sm_rt[1073] [[ Reboot == Reboot ]] :cl_cfg_sm_rt[1075] chrsrc -c IBM.PeerNode CritRsrcProtMethod=1 :cl_cfg_sm_rt[1077] src_rc=0 :cl_cfg_sm_rt[1078] (( 0 != 0 )) :cl_cfg_sm_rt[1086] : Configure RSCT Critical Resource Daemon Grace Period for cluster level. :cl_cfg_sm_rt[1088] typeset grace_period :cl_cfg_sm_rt[1089] clodmget -f crit_daemon_restart_grace_period HACMPcluster :cl_cfg_sm_rt[1089] grace_period=60 :cl_cfg_sm_rt[1090] lsrsrc -c IBM.PeerNode :cl_cfg_sm_rt[1090] LC_ALL=C :cl_cfg_sm_rt[1090] awk -F= '{print $2}' :cl_cfg_sm_rt[1090] grep CritDaemonRestartGracePeriod :cl_cfg_sm_rt[1090] rsct_grace_period=' 60' :cl_cfg_sm_rt[1091] [[ -n ' 60' ]] :cl_cfg_sm_rt[1092] (( 60 != 60 )) :cl_cfg_sm_rt[1104] : Configure RSCT Critical Resource Daemon Grace Period for node level. :cl_cfg_sm_rt[1106] typeset node_grace_period :cl_cfg_sm_rt[1107] typeset node_list :cl_cfg_sm_rt[1108] typeset rsct_node_grace_period :cl_cfg_sm_rt[1110] : Get the CAA active nodes list :cl_cfg_sm_rt[1112] lscluster -m :cl_cfg_sm_rt[1112] grep -p 'State of node: UP' :cl_cfg_sm_rt[1112] grep -w 'Node name:' :cl_cfg_sm_rt[1112] cut -f2 -d: :cl_cfg_sm_rt[1112] node_list=$' epprda\n epprds' :cl_cfg_sm_rt[1115] clodmget -n -q object='COMMUNICATION_PATH and value=epprda' -f name HACMPnode :cl_cfg_sm_rt[1115] host_name=epprda :cl_cfg_sm_rt[1116] clodmget -n -q object='CRIT_DAEMON_RESTART_GRACE_PERIOD and name=epprda' -f value HACMPnode :cl_cfg_sm_rt[1116] node_grace_period='' :cl_cfg_sm_rt[1117] [[ -n '' ]] :cl_cfg_sm_rt[1115] clodmget -n -q object='COMMUNICATION_PATH and value=epprds' -f name HACMPnode :cl_cfg_sm_rt[1115] host_name=epprds :cl_cfg_sm_rt[1116] clodmget -n -q object='CRIT_DAEMON_RESTART_GRACE_PERIOD and name=epprds' -f value HACMPnode :cl_cfg_sm_rt[1116] node_grace_period='' :cl_cfg_sm_rt[1117] [[ -n '' ]] :cl_cfg_sm_rt[1134] : Success exit. Display the CAA and RSCT configuration :cl_cfg_sm_rt[1136] clctrl -tune -a epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).communication_mode = u epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).config_timeout = 240 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).deadman_mode = a epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).dr_enabled = 1 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).link_timeout = 30000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).local_merge_policy = h epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).network_fdt = 20000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).no_if_traffic_monitor = 0 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).node_down_delay = 10000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).node_timeout = 30000 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).packet_ttl = 32 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).remote_hb_factor = 1 epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).repos_mode = e epprda_cluster(f43c91c2-9ee2-11ed-8018-fae6134ea920).site_merge_policy = h :cl_cfg_sm_rt[1137] lscluster -m Calling node query for all nodes... Node query number of nodes examined: 2 Node name: epprda Cluster shorthand id for node: 1 UUID for node: f42873b8-9ee2-11ed-8018-fae6134ea920 State of node: UP NODE_LOCAL Reason: NONE Smoothed rtt to node: 0 Mean Deviation in network rtt to node: 0 Number of clusters node is a member in: 1 CLUSTER NAME SHID UUID epprda_cluster 0 f43c91c2-9ee2-11ed-8018-fae6134ea920 SITE NAME SHID UUID LOCAL 1 51735173-5173-5173-5173-517351735173 Points of contact for node: 0 ---------------------------------------------------------------------------- Node name: epprds Cluster shorthand id for node: 2 UUID for node: f42873fe-9ee2-11ed-8018-fae6134ea920 State of node: UP Reason: NONE Smoothed rtt to node: 8 Mean Deviation in network rtt to node: 4 Number of clusters node is a member in: 1 CLUSTER NAME SHID UUID epprda_cluster 0 f43c91c2-9ee2-11ed-8018-fae6134ea920 SITE NAME SHID UUID LOCAL 1 51735173-5173-5173-5173-517351735173 Points of contact for node: 1 ----------------------------------------------------------------------- Interface State Protocol Status SRC_IP->DST_IP ----------------------------------------------------------------------- tcpsock->02 UP IPv4 none 61.81.244.134->61.81.244.123 :cl_cfg_sm_rt[1138] lsrsrc -x -A b IBM.PeerNode resource 1: Name = "epprds" NodeList = {2} RSCTVersion = "3.2.6.4" ClassVersions = {} CritRsrcProtMethod = 0 IsQuorumNode = 1 IsPreferredGSGL = 1 NodeUUID = "f42873fe-9ee2-11ed-8018-fae6134ea920" HostName = "epprds" TBPriority = 0 CritDaemonRestartGracePeriod = -1 ActivePeerDomain = "epprda_cluster" NodeNameList = {"epprds"} OpState = 1 ConfigChanged = 0 CritRsrcActive = 0 OpUsabilityState = 1 MaintenanceState = 0 resource 2: Name = "epprda" NodeList = {1} RSCTVersion = "3.2.6.4" ClassVersions = {} CritRsrcProtMethod = 0 IsQuorumNode = 1 IsPreferredGSGL = 1 NodeUUID = "f42873b8-9ee2-11ed-8018-fae6134ea920" HostName = "epprda" TBPriority = 0 CritDaemonRestartGracePeriod = -1 ActivePeerDomain = "epprda_cluster" NodeNameList = {"epprda"} OpState = 1 ConfigChanged = 0 CritRsrcActive = 0 OpUsabilityState = 1 MaintenanceState = 0 :cl_cfg_sm_rt[1139] lsrsrc -x -c -A b IBM.PeerNode resource 1: CommittedRSCTVersion = "3.2.2.0" ActiveVersionChanging = 0 OpQuorumOverride = 0 CritRsrcProtMethod = 1 OpQuorumTieBreaker = "Success" QuorumType = 4 QuorumGroupName = "" Fanout = 32 OpFenceGroup = "" NodeCleanupCommand = "" NodeCleanupCriteria = "" QuorumLessStartupTimeout = 120 CriticalMode = 2 NotifyQuorumChangedCommand = "" NamePolicy = 1 LiveUpdateOptions = "" QuorumNotificationRespWaitTime = 0 MaintenanceModeConfig = "" CritDaemonRestartGracePeriod = 60 :cl_cfg_sm_rt[1141] return 0 :cl_cfg_sm_rt[1] on_exit 0 :node_up[441] : exit status of cl_cfg_sm_rt is 0 :node_up[498] : Enable NFS crossmounts during manual start :node_up[500] [[ -n false ]] :node_up[500] [[ false == true ]] :node_up[607] : When RG dependencies are not configured we call node_up_local/remote, :node_up[608] : followed by process_resources to process any remaining groups :node_up[610] [[ TRUE == FALSE ]] :node_up[657] [[ epprda == epprda ]] :node_up[660] : Perform any deferred TCP daemon startup, if necessary, :node_up[661] : along with any necessary start up of iSCSI devices. :node_up[663] cl_telinit :cl_telinit[178] version=%I% :cl_telinit[182] TELINIT_FILE=/usr/es/sbin/cluster/.telinit :cl_telinit[183] USE_TELINIT_FILE=/usr/es/sbin/cluster/.use_telinit :cl_telinit[185] [[ -f /usr/es/sbin/cluster/.use_telinit ]] :cl_telinit[189] USE_TELINIT=0 :cl_telinit[198] [[ '' == -boot ]] :cl_telinit[236] cl_lsitab clinit :cl_telinit[236] 1> /dev/null 2>& 1 :cl_telinit[239] : telinit a disabled :cl_telinit[241] return 0 :node_up[664] : exit status of cl_telinit is: 0 :node_up[667] return 0 Sep 30 2023 03:25:11 EVENT COMPLETED: node_up epprda 0 |2023-09-30T03:25:11|18769|EVENT COMPLETED: node_up epprda 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-30T03:25:11.996846 + echo '|2023-09-30T03:25:11.996846|INFO: node_up|epprda|0' + 1>> /var/hacmp/availability/clavailability.log Sep 30 2023 03:25:14 EVENT START: rg_move_fence epprda 1 |2023-09-30T03:25:14|18770|EVENT START: rg_move_fence epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-30T03:25:14.210192 + echo '|2023-09-30T03:25:14.210192|INFO: rg_move_fence|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_fence[62] [[ high == high ]] :rg_move_fence[62] version=1.11 :rg_move_fence[63] NODENAME=epprda :rg_move_fence[63] export NODENAME :rg_move_fence[65] set -u :rg_move_fence[67] [ 2 != 2 ] :rg_move_fence[73] set +u :rg_move_fence[75] [[ -z TRUE ]] :rg_move_fence[80] [[ TRUE == TRUE ]] :rg_move_fence[82] LOCAL_NODENAME=epprda :rg_move_fence[83] odmget -qid=1 HACMPgroup :rg_move_fence[83] egrep 'group =' :rg_move_fence[83] awk '{print $3}' :rg_move_fence[83] eval RGNAME='"epprd_rg"' :rg_move_fence[1] RGNAME=epprd_rg +epprd_rg:rg_move_fence[84] GROUPNAME=epprd_rg +epprd_rg:rg_move_fence[85] group_state='$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[86] set +u +epprd_rg:rg_move_fence[87] eval print '$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[1] print +epprd_rg:rg_move_fence[87] RG_MOVE_ONLINE='' +epprd_rg:rg_move_fence[87] export RG_MOVE_ONLINE +epprd_rg:rg_move_fence[88] set -u +epprd_rg:rg_move_fence[89] RG_MOVE_ONLINE=TMP_ERROR +epprd_rg:rg_move_fence[91] set -a +epprd_rg:rg_move_fence[92] clsetenvgrp epprda rg_move epprd_rg '' :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 +epprd_rg:rg_move_fence[92] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[93] RC=0 +epprd_rg:rg_move_fence[94] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[1] FORCEDOWN_GROUPS='' +epprd_rg:rg_move_fence[2] RESOURCE_GROUPS='' +epprd_rg:rg_move_fence[3] HOMELESS_GROUPS='' +epprd_rg:rg_move_fence[4] HOMELESS_FOLLOWER_GROUPS='' +epprd_rg:rg_move_fence[5] ERRSTATE_GROUPS='' +epprd_rg:rg_move_fence[6] PRINCIPAL_ACTIONS='' +epprd_rg:rg_move_fence[7] ASSOCIATE_ACTIONS='' +epprd_rg:rg_move_fence[8] AUXILLIARY_ACTIONS='' +epprd_rg:rg_move_fence[8] SIBLING_GROUPS='' +epprd_rg:rg_move_fence[9] SIBLING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[10] SIBLING_ACQUIRING_GROUPS='' +epprd_rg:rg_move_fence[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[12] SIBLING_RELEASING_GROUPS='' +epprd_rg:rg_move_fence[13] SIBLING_RELEASING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[95] set +a +epprd_rg:rg_move_fence[96] [ 0 -ne 0 ] +epprd_rg:rg_move_fence[103] process_resources FENCE :rg_move_fence[3318] version=1.169 :rg_move_fence[3321] STATUS=0 :rg_move_fence[3322] sddsrv_off=FALSE :rg_move_fence[3324] true :rg_move_fence[3326] : call rgpa, and it will tell us what to do next :rg_move_fence[3328] set -a :rg_move_fence[3329] clRGPA FENCE :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa FENCE 2023-09-30T03:25:14.324237 clrgpa :clRGPA[+55] exit 0 :rg_move_fence[3329] eval JOB_TYPE=NONE :rg_move_fence[1] JOB_TYPE=NONE :rg_move_fence[3330] RC=0 :rg_move_fence[3331] set +a :rg_move_fence[3333] (( 0 != 0 )) :rg_move_fence[3342] RESOURCE_GROUPS='' :rg_move_fence[3343] GROUPNAME='' :rg_move_fence[3343] export GROUPNAME :rg_move_fence[3353] IS_SERVICE_START=1 :rg_move_fence[3354] IS_SERVICE_STOP=1 :rg_move_fence[3360] [[ NONE == RELEASE ]] :rg_move_fence[3360] [[ NONE == ONLINE ]] :rg_move_fence[3729] break :rg_move_fence[3740] : If sddsrv was turned off above, turn it back on again :rg_move_fence[3742] [[ FALSE == TRUE ]] :rg_move_fence[3747] exit 0 +epprd_rg:rg_move_fence[104] : exit status of process_resources FENCE is: 0 +epprd_rg:rg_move_fence[107] [[ TRUE == TRUE ]] +epprd_rg:rg_move_fence[109] export EVENT_TYPE +epprd_rg:rg_move_fence[110] echo ACQUIRE_PRIMARY ACQUIRE_PRIMARY +epprd_rg:rg_move_fence[111] [[ -n '' ]] +epprd_rg:rg_move_fence[141] exit 0 Sep 30 2023 03:25:14 EVENT COMPLETED: rg_move_fence epprda 1 0 |2023-09-30T03:25:14|18770|EVENT COMPLETED: rg_move_fence epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-30T03:25:14.420428 + echo '|2023-09-30T03:25:14.420428|INFO: rg_move_fence|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log Sep 30 2023 03:25:14 EVENT START: rg_move_acquire epprda 1 |2023-09-30T03:25:14|18770|EVENT START: rg_move_acquire epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-30T03:25:14.614069 + echo '|2023-09-30T03:25:14.614069|INFO: rg_move_acquire|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_acquire[+54] [[ high == high ]] :rg_move_acquire[+54] version=1.9.1.7 :rg_move_acquire[+57] set -u :rg_move_acquire[+59] [ 2 != 2 ] :rg_move_acquire[+65] set +u :rg_move_acquire[+67] :rg_move_acquire[+67] clodmget -n -q id=1 -f group HACMPgroup RG=epprd_rg :rg_move_acquire[+68] export RG :rg_move_acquire[+70] [[ ACQUIRE_PRIMARY == ACQUIRE_PRIMARY ]] :rg_move_acquire[+75] typeset -i anhp_ret=0 :rg_move_acquire[+76] typeset -i scsi_ret=0 :rg_move_acquire[+78] clodmget -n -q policy = anhp -f value HACMPsplitmerge :rg_move_acquire[+78] typeset ANHP_ENABLED= :rg_move_acquire[+78] [[ == Yes ]] :rg_move_acquire[+87] clodmget -n -q policy = scsi -f value HACMPsplitmerge :rg_move_acquire[+87] typeset SCSIPR_ENABLED= :rg_move_acquire[+87] [[ == Yes ]] :rg_move_acquire[+106] (( 0 == 1 && 0 == 1 )) :rg_move_acquire[+109] (( 0 == 1 && 0 == 0 )) :rg_move_acquire[+112] (( 0 == 1 && 0 == 0 )) :rg_move_acquire[+118] clcallev rg_move epprda 1 ACQUIRE Sep 30 2023 03:25:14 EVENT START: rg_move epprda 1 ACQUIRE |2023-09-30T03:25:14|18770|EVENT START: rg_move epprda 1 ACQUIRE| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-09-30T03:25:14.742160 :clevlog[amlog_trace:320] echo '|2023-09-30T03:25:14.742160|INFO: rg_move|epprd_rg|epprda|1|ACQUIRE' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :rg_move[76] version=%I% :rg_move[86] STATUS=0 :rg_move[88] [[ ! -n '' ]] :rg_move[90] EMULATE=REAL :rg_move[96] set -u :rg_move[98] NODENAME=epprda :rg_move[98] export NODENAME :rg_move[99] RGID=1 :rg_move[100] (( 3 == 3 )) :rg_move[102] ACTION=ACQUIRE :rg_move[108] : serial number for this event is 18770 :rg_move[112] RG_UP_POSTEVENT_ON_NODE=epprda :rg_move[112] export RG_UP_POSTEVENT_ON_NODE :rg_move[116] clodmget -qid=1 -f group -n HACMPgroup :rg_move[116] eval RGNAME=epprd_rg :rg_move[1] RGNAME=epprd_rg :rg_move[118] UPDATESTATD=0 :rg_move[119] export UPDATESTATD :rg_move[123] RG_MOVE_EVENT=true :rg_move[123] export RG_MOVE_EVENT :rg_move[128] group_state='$RESGRP_epprd_rg_epprda' :rg_move[129] set +u :rg_move[130] eval print '$RESGRP_epprd_rg_epprda' :rg_move[1] print :rg_move[130] RG_MOVE_ONLINE='' :rg_move[130] export RG_MOVE_ONLINE :rg_move[131] set -u :rg_move[132] RG_MOVE_ONLINE=TMP_ERROR :rg_move[139] rm -f /tmp/.NFSSTOPPED :rg_move[140] rm -f /tmp/.RPCLOCKDSTOPPED :rg_move[147] set -a :rg_move[148] clsetenvgrp epprda rg_move epprd_rg :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 :rg_move[148] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[149] RC=0 :rg_move[150] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[1] FORCEDOWN_GROUPS='' :rg_move[2] RESOURCE_GROUPS='' :rg_move[3] HOMELESS_GROUPS='' :rg_move[4] HOMELESS_FOLLOWER_GROUPS='' :rg_move[5] ERRSTATE_GROUPS='' :rg_move[6] PRINCIPAL_ACTIONS='' :rg_move[7] ASSOCIATE_ACTIONS='' :rg_move[8] AUXILLIARY_ACTIONS='' :rg_move[8] SIBLING_GROUPS='' :rg_move[9] SIBLING_NODES_BY_GROUP='' :rg_move[10] SIBLING_ACQUIRING_GROUPS='' :rg_move[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' :rg_move[12] SIBLING_RELEASING_GROUPS='' :rg_move[13] SIBLING_RELEASING_NODES_BY_GROUP='' :rg_move[151] set +a :rg_move[155] (( 0 != 0 )) :rg_move[155] [[ -z epprd_rg ]] :rg_move[164] [[ -z TRUE ]] :rg_move[241] AM_SYNC_CALLED_BY=RG_MOVE :rg_move[241] export AM_SYNC_CALLED_BY :rg_move[242] process_resources :process_resources[3318] version=1.169 :process_resources[3321] STATUS=0 :process_resources[3322] sddsrv_off=FALSE :process_resources[3324] true :process_resources[3326] : call rgpa, and it will tell us what to do next :process_resources[3328] set -a :process_resources[3329] clRGPA :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa 2023-09-30T03:25:14.860848 clrgpa :clRGPA[+55] exit 0 :process_resources[3329] eval JOB_TYPE=ACQUIRE RESOURCE_GROUPS='"epprd_rg"' PRINCIPAL_ACTION='"ACQUIRE"' AUXILLIARY_ACTION='"NONE"' :process_resources[1] JOB_TYPE=ACQUIRE :process_resources[1] RESOURCE_GROUPS=epprd_rg :process_resources[1] PRINCIPAL_ACTION=ACQUIRE :process_resources[1] AUXILLIARY_ACTION=NONE :process_resources[3330] RC=0 :process_resources[3331] set +a :process_resources[3333] (( 0 != 0 )) :process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[3360] [[ ACQUIRE == ONLINE ]] +epprd_rg:process_resources[3652] set_resource_group_state ACQUIRING +epprd_rg:process_resources[set_resource_group_state:82] PS4_FUNC=set_resource_group_state +epprd_rg:process_resources[set_resource_group_state:82] typeset PS4_FUNC +epprd_rg:process_resources[set_resource_group_state:83] [[ high == high ]] +epprd_rg:process_resources[set_resource_group_state:83] set -x +epprd_rg:process_resources[set_resource_group_state:84] STAT=0 +epprd_rg:process_resources[set_resource_group_state:85] new_status=ACQUIRING +epprd_rg:process_resources[set_resource_group_state:89] export GROUPNAME +epprd_rg:process_resources[set_resource_group_state:90] [[ ACQUIRING != DOWN ]] +epprd_rg:process_resources[set_resource_group_state:92] clchdaemons -d clstrmgr_scripts -t resource_locator -n epprda -o epprd_rg -v ACQUIRING +epprd_rg:process_resources[set_resource_group_state:100] : Resource Manager Updates +epprd_rg:process_resources[set_resource_group_state:105] amlog_trace '' 'acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:318] clcycle clavailability.log +epprd_rg:process_resources[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:process_resources[amlog_trace:319] cltime +epprd_rg:process_resources[amlog_trace:319] DATE=2023-09-30T03:25:14.897952 +epprd_rg:process_resources[amlog_trace:320] echo '|2023-09-30T03:25:14.897952|INFO: acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:process_resources[set_resource_group_state:106] cl_RMupdate acquiring epprd_rg process_resources 2023-09-30T03:25:14.922468 2023-09-30T03:25:14.927706 +epprd_rg:process_resources[set_resource_group_state:153] return 0 +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-09-30T03:25:14.939864 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=WPAR ACTION=ACQUIRE RESOURCE_GROUPS='"epprd_rg' '"' +epprd_rg:process_resources[1] JOB_TYPE=WPAR +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ WPAR == RELEASE ]] +epprd_rg:process_resources[3360] [[ WPAR == ONLINE ]] +epprd_rg:process_resources[3492] process_wpars ACQUIRE +epprd_rg:process_resources[process_wpars:3265] PS4_FUNC=process_wpars +epprd_rg:process_resources[process_wpars:3265] typeset PS4_FUNC +epprd_rg:process_resources[process_wpars:3266] [[ high == high ]] +epprd_rg:process_resources[process_wpars:3266] set -x +epprd_rg:process_resources[process_wpars:3267] STAT=0 +epprd_rg:process_resources[process_wpars:3268] action=ACQUIRE +epprd_rg:process_resources[process_wpars:3268] typeset action +epprd_rg:process_resources[process_wpars:3272] export GROUPNAME +epprd_rg:process_resources[process_wpars:3275] clstart_wpar +epprd_rg:clstart_wpar[180] version=1.12.1.1 +epprd_rg:clstart_wpar[184] [[ rg_move == reconfig_resource_acquire ]] +epprd_rg:clstart_wpar[184] [[ ACQUIRE_PRIMARY == reconfig_resource_acquire ]] +epprd_rg:clstart_wpar[193] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clstart_wpar[193] [[ -z '' ]] +epprd_rg:clstart_wpar[193] exit 0 +epprd_rg:process_resources[process_wpars:3276] RC=0 +epprd_rg:process_resources[process_wpars:3285] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[process_wpars:3294] return 0 +epprd_rg:process_resources[3493] RC=0 +epprd_rg:process_resources[3495] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-09-30T03:25:14.970804 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=SERVICE_LABELS ACTION=ACQUIRE IP_LABELS='"epprd"' RESOURCE_GROUPS='"epprd_rg' '"' COMMUNICATION_LINKS='""' +epprd_rg:process_resources[1] JOB_TYPE=SERVICE_LABELS +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] IP_LABELS=epprd +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] COMMUNICATION_LINKS='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ SERVICE_LABELS == RELEASE ]] +epprd_rg:process_resources[3360] [[ SERVICE_LABELS == ONLINE ]] +epprd_rg:process_resources[3407] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[3409] acquire_service_labels +epprd_rg:process_resources[acquire_service_labels:3083] PS4_FUNC=acquire_service_labels +epprd_rg:process_resources[acquire_service_labels:3083] typeset PS4_FUNC +epprd_rg:process_resources[acquire_service_labels:3084] [[ high == high ]] +epprd_rg:process_resources[acquire_service_labels:3084] set -x +epprd_rg:process_resources[acquire_service_labels:3085] STAT=0 +epprd_rg:process_resources[acquire_service_labels:3086] clcallev acquire_service_addr Sep 30 2023 03:25:14 EVENT START: acquire_service_addr |2023-09-30T03:25:15|18770|EVENT START: acquire_service_addr | +epprd_rg:acquire_service_addr[416] version=1.74.1.5 +epprd_rg:acquire_service_addr[423] [[ SERVICE_LABELS != 0 ]] +epprd_rg:acquire_service_addr[423] [[ SERVICE_LABELS != GROUP ]] +epprd_rg:acquire_service_addr[424] PROC_RES=true +epprd_rg:acquire_service_addr[440] saveNSORDER=UNDEFINED +epprd_rg:acquire_service_addr[441] NSORDER=local +epprd_rg:acquire_service_addr[442] export NSORDER +epprd_rg:acquire_service_addr[445] cl_RMupdate resource_acquiring All_service_addrs acquire_service_addr 2023-09-30T03:25:15.053484 2023-09-30T03:25:15.057720 +epprd_rg:acquire_service_addr[452] export GROUPNAME +epprd_rg:acquire_service_addr[458] [[ true == true ]] +epprd_rg:acquire_service_addr[459] get_list_head epprd +epprd_rg:acquire_service_addr[459] read SERVICELABELS +epprd_rg:acquire_service_addr[460] get_list_tail epprd +epprd_rg:acquire_service_addr[460] read IP_LABELS +epprd_rg:acquire_service_addr[471] clgetif -a epprd +epprd_rg:acquire_service_addr[471] 2> /dev/null +epprd_rg:acquire_service_addr[472] (( 3 != 0 )) +epprd_rg:acquire_service_addr[477] cllsif -J '~' -Sn epprd +epprd_rg:acquire_service_addr[477] cut -d~ -f3 +epprd_rg:acquire_service_addr[477] uniq +epprd_rg:acquire_service_addr[477] NETWORK=net_ether_01 +epprd_rg:acquire_service_addr[478] cllsif -J '~' -Si epprda +epprd_rg:acquire_service_addr[478] awk -F~ -v NET=net_ether_01 '{if ($2 == "boot" && $3 == NET) print $1}' +epprd_rg:acquire_service_addr[478] sort +epprd_rg:acquire_service_addr[478] boot_list=epprda +epprd_rg:acquire_service_addr[480] [[ -z epprda ]] +epprd_rg:acquire_service_addr[492] best_boot_addr net_ether_01 epprda +epprd_rg:acquire_service_addr[best_boot_addr:106] NETWORK=net_ether_01 +epprd_rg:acquire_service_addr[best_boot_addr:106] typeset NETWORK +epprd_rg:acquire_service_addr[best_boot_addr:107] shift +epprd_rg:acquire_service_addr[best_boot_addr:108] candidate_boots=epprda +epprd_rg:acquire_service_addr[best_boot_addr:108] typeset candidate_boots +epprd_rg:acquire_service_addr[best_boot_addr:112] echo epprda +epprd_rg:acquire_service_addr[best_boot_addr:112] tr ' ' '\n' +epprd_rg:acquire_service_addr[best_boot_addr:112] wc -l +epprd_rg:acquire_service_addr[best_boot_addr:112] num_candidates=' 1' +epprd_rg:acquire_service_addr[best_boot_addr:112] typeset -li num_candidates +epprd_rg:acquire_service_addr[best_boot_addr:113] (( 1 == 1 )) +epprd_rg:acquire_service_addr[best_boot_addr:114] echo epprda +epprd_rg:acquire_service_addr[best_boot_addr:115] return +epprd_rg:acquire_service_addr[492] boot_addr=epprda +epprd_rg:acquire_service_addr[493] (( 0 != 0 )) +epprd_rg:acquire_service_addr[505] clgetif -a epprda +epprd_rg:acquire_service_addr[505] 2> /dev/null +epprd_rg:acquire_service_addr[505] cut -f1 +epprd_rg:acquire_service_addr[505] INTERFACE='en0 ' +epprd_rg:acquire_service_addr[507] cllsif -J '~' -Sn epprda +epprd_rg:acquire_service_addr[507] cut -f7,9 -d~ +epprd_rg:acquire_service_addr[508] read boot_dot_addr INTERFACE +epprd_rg:acquire_service_addr[508] IFS='~' +epprd_rg:acquire_service_addr[510] [[ -z en0 ]] +epprd_rg:acquire_service_addr[527] cllsif -J '~' -Sn epprd +epprd_rg:acquire_service_addr[527] cut -f7,11,15 -d~ +epprd_rg:acquire_service_addr[527] uniq +epprd_rg:acquire_service_addr[528] read service_dot_addr NETMASK INET_FAMILY +epprd_rg:acquire_service_addr[528] IFS='~' +epprd_rg:acquire_service_addr[530] [[ AF_INET == AF_INET6 ]] +epprd_rg:acquire_service_addr[534] cl_swap_IP_address rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0 +epprd_rg:cl_swap_IP_address[462] version=1.9.14.8 +epprd_rg:cl_swap_IP_address[464] cl_get_path -S +epprd_rg:cl_swap_IP_address[464] OP_SEP='~' +epprd_rg:cl_swap_IP_address[465] LC_ALL=C +epprd_rg:cl_swap_IP_address[465] export LC_ALL +epprd_rg:cl_swap_IP_address[466] RESTORE_ROUTES=/usr/es/sbin/cluster/.restore_routes +epprd_rg:cl_swap_IP_address[468] cl_echo 33 'Starting execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0' /usr/es/sbin/cluster/events/utils/cl_swap_IP_address 'rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0' Sep 30 2023 03:25:15Starting execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0+epprd_rg:cl_swap_IP_address[470] typeset -i oslevel +epprd_rg:cl_swap_IP_address[471] /usr/bin/oslevel -r +epprd_rg:cl_swap_IP_address[471] /usr/bin/sed s/-//g +epprd_rg:cl_swap_IP_address[471] oslevel=720005 +epprd_rg:cl_swap_IP_address[476] [[ 6 == 6 ]] +epprd_rg:cl_swap_IP_address[477] [[ 6 == 7 ]] +epprd_rg:cl_swap_IP_address[484] no -a +epprd_rg:cl_swap_IP_address[484] grep ipignoreredirects +epprd_rg:cl_swap_IP_address[484] awk '{ print $3 }' +epprd_rg:cl_swap_IP_address[484] PRIOR_IPIGNORE_REDIRECTS_VALUE=0 +epprd_rg:cl_swap_IP_address[485] /usr/sbin/no -o ipignoreredirects=1 Setting ipignoreredirects to 1 +epprd_rg:cl_swap_IP_address[490] PROC_RES=false +epprd_rg:cl_swap_IP_address[491] [[ SERVICE_LABELS != 0 ]] +epprd_rg:cl_swap_IP_address[491] [[ SERVICE_LABELS != GROUP ]] +epprd_rg:cl_swap_IP_address[492] PROC_RES=true +epprd_rg:cl_swap_IP_address[495] set -u +epprd_rg:cl_swap_IP_address[497] RC=0 +epprd_rg:cl_swap_IP_address[504] netstat -in Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll en0 1500 link#2 fa.e6.13.4e.a9.20 16026 0 5417 0 0 en0 1500 61.81.244 61.81.244.134 16026 0 5417 0 0 lo0 16896 link#1 2495 0 2495 0 0 lo0 16896 127 127.0.0.1 2495 0 2495 0 0 lo0 16896 ::1%1 2495 0 2495 0 0 +epprd_rg:cl_swap_IP_address[505] netstat -rnC Routing tables Destination Gateway Flags Wt Policy If Cost Config_Cost Route tree for Protocol Family 2 (Internet): default 61.81.244.1 UG 1 - en0 0 0 61.81.244.0 61.81.244.134 UHSb 1 - en0 0 0 => 61.81.244/24 61.81.244.134 U 1 - en0 0 0 61.81.244.134 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.255 61.81.244.134 UHSb 1 - en0 0 0 127/8 127.0.0.1 U 1 - lo0 0 0 Route tree for Protocol Family 24 (Internet v6): ::1%1 ::1%1 UH 1 - lo0 0 0 +epprd_rg:cl_swap_IP_address[506] CASC_OR_ROT=rotating +epprd_rg:cl_swap_IP_address[507] ACQ_OR_RLSE=acquire +epprd_rg:cl_swap_IP_address[508] IF=en0 +epprd_rg:cl_swap_IP_address[509] ADDR=61.81.244.156 +epprd_rg:cl_swap_IP_address[510] OLD_ADDR=61.81.244.134 +epprd_rg:cl_swap_IP_address[511] NETMASK=255.255.255.0 +epprd_rg:cl_swap_IP_address[514] [[ rotating == cascading ]] +epprd_rg:cl_swap_IP_address[525] cllsif -J '~' -Sw -n 61.81.244.156 +epprd_rg:cl_swap_IP_address[525] cut -f3 -d~ +epprd_rg:cl_swap_IP_address[525] NET=net_ether_01 +epprd_rg:cl_swap_IP_address[528] clodmget -qidentifier=61.81.244.156 -f max_aliases -n HACMPadapter +epprd_rg:cl_swap_IP_address[528] ALIAS_FIRST=0 +epprd_rg:cl_swap_IP_address[529] grep -c -w inet +epprd_rg:cl_swap_IP_address[529] ifconfig en0 +epprd_rg:cl_swap_IP_address[529] LC_ALL=C +epprd_rg:cl_swap_IP_address[529] NUM_ADDRS=1 +epprd_rg:cl_swap_IP_address[530] [[ acquire == acquire ]] +epprd_rg:cl_swap_IP_address[533] amlog_trace '' 'Aliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_swap_IP_address[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_swap_IP_address[amlog_trace:319] cltime +epprd_rg:cl_swap_IP_address[amlog_trace:319] DATE=2023-09-30T03:25:15.328649 +epprd_rg:cl_swap_IP_address[amlog_trace:320] echo '|2023-09-30T03:25:15.328649|INFO: Aliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_swap_IP_address[535] cl_echo 7310 'cl_swap_IP_address: Configuring network interface en0 with aliased IP address 61.81.244.156' cl_swap_IP_address en0 61.81.244.156 Sep 30 2023 03:25:15cl_swap_IP_address: Configuring network interface en0 with aliased IP address 61.81.244.156+epprd_rg:cl_swap_IP_address[546] (( 1 > 1 )) +epprd_rg:cl_swap_IP_address[550] clifconfig en0 alias 61.81.244.156 netmask 255.255.255.0 firstalias +epprd_rg:clifconfig[117] version=1.9 +epprd_rg:clifconfig[121] set -A args en0 alias 61.81.244.156 netmask 255.255.255.0 firstalias +epprd_rg:clifconfig[124] interface=en0 +epprd_rg:clifconfig[125] shift +epprd_rg:clifconfig[127] [[ -n alias ]] +epprd_rg:clifconfig[129] alias_val=1 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n 61.81.244.156 ]] +epprd_rg:clifconfig[147] params=' address=61.81.244.156' +epprd_rg:clifconfig[147] addr=61.81.244.156 +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n netmask ]] +epprd_rg:clifconfig[149] params=' address=61.81.244.156 netmask=255.255.255.0' +epprd_rg:clifconfig[149] shift +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n firstalias ]] +epprd_rg:clifconfig[167] shift +epprd_rg:clifconfig[127] [[ -n '' ]] +epprd_rg:clifconfig[174] [[ -n 1 ]] +epprd_rg:clifconfig[174] [[ -n epprd_rg ]] +epprd_rg:clifconfig[175] clwparname epprd_rg +epprd_rg:clwparname[38] version=1.3.1.1 +epprd_rg:clwparname[44] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clwparname[44] [[ -z '' ]] +epprd_rg:clwparname[44] exit 0 +epprd_rg:clifconfig[175] WPARNAME='' +epprd_rg:clifconfig[176] (( 0 == 0 )) +epprd_rg:clifconfig[176] [[ -n '' ]] +epprd_rg:clifconfig[218] belongs_to_an_active_wpar 61.81.244.156 +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] [[ -z '' ]] +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] return 1 +epprd_rg:clifconfig[218] read wpar_name wpar_if wpar_netmask wpar_broadcast +epprd_rg:clifconfig[218] IFS='~' +epprd_rg:clifconfig[219] rc=1 +epprd_rg:clifconfig[221] [[ 1 == 0 ]] +epprd_rg:clifconfig[275] ifconfig en0 alias 61.81.244.156 netmask 255.255.255.0 firstalias +epprd_rg:cl_swap_IP_address[584] hats_adapter_notify en0 -e 61.81.244.156 alias 2023-09-30T03:25:15.384590 hats_adapter_notify 2023-09-30T03:25:15.386854 hats_adapter_notify +epprd_rg:cl_swap_IP_address[587] check_alias_status en0 61.81.244.156 acquire +epprd_rg:cl_swap_IP_address[check_alias_status:108] CH_INTERFACE=en0 +epprd_rg:cl_swap_IP_address[check_alias_status:109] CH_ADDRESS=61.81.244.156 +epprd_rg:cl_swap_IP_address[check_alias_status:110] CH_ACQ_OR_RLSE=acquire +epprd_rg:cl_swap_IP_address[check_alias_status:118] IF_IB=en0 +epprd_rg:cl_swap_IP_address[check_alias_status:120] echo en0 +epprd_rg:cl_swap_IP_address[check_alias_status:120] awk '{print index($0, "ib")}' +epprd_rg:cl_swap_IP_address[check_alias_status:120] IS_IB=0 +epprd_rg:cl_swap_IP_address[check_alias_status:122] [[ 0 != 1 ]] +epprd_rg:cl_swap_IP_address[check_alias_status:124] clifconfig en0 +epprd_rg:cl_swap_IP_address[check_alias_status:124] fgrep -w 61.81.244.156 +epprd_rg:clifconfig[117] version=1.9 +epprd_rg:clifconfig[121] set -A args en0 +epprd_rg:clifconfig[124] interface=en0 +epprd_rg:clifconfig[125] shift +epprd_rg:clifconfig[127] [[ -n '' ]] +epprd_rg:clifconfig[174] [[ -n '' ]] +epprd_rg:clifconfig[218] belongs_to_an_active_wpar +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:cl_swap_IP_address[check_alias_status:124] awk '{print $2}' +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] [[ -z '' ]] +epprd_rg:clifconfig[belongs_to_an_active_wpar:63] return 1 +epprd_rg:clifconfig[218] read wpar_name wpar_if wpar_netmask wpar_broadcast +epprd_rg:clifconfig[218] IFS='~' +epprd_rg:clifconfig[219] rc=1 +epprd_rg:clifconfig[221] [[ 1 == 0 ]] +epprd_rg:clifconfig[275] ifconfig en0 +epprd_rg:cl_swap_IP_address[check_alias_status:124] ADDR=61.81.244.156 +epprd_rg:cl_swap_IP_address[check_alias_status:129] [ acquire = acquire ] +epprd_rg:cl_swap_IP_address[check_alias_status:133] [[ 61.81.244.156 != 61.81.244.156 ]] +epprd_rg:cl_swap_IP_address[check_alias_status:144] return 0 +epprd_rg:cl_swap_IP_address[588] RC=0 +epprd_rg:cl_swap_IP_address[590] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[594] amlog_trace '' 'Aliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_swap_IP_address[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_swap_IP_address[amlog_trace:319] cltime +epprd_rg:cl_swap_IP_address[amlog_trace:319] DATE=2023-09-30T03:25:15.452708 +epprd_rg:cl_swap_IP_address[amlog_trace:320] echo '|2023-09-30T03:25:15.452708|INFO: Aliasing Service IP|61.81.244.156' +epprd_rg:cl_swap_IP_address[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_swap_IP_address[701] [[ 0 != 0 ]] +epprd_rg:cl_swap_IP_address[714] flush_arp +epprd_rg:cl_swap_IP_address[flush_arp:49] arp -an +epprd_rg:cl_swap_IP_address[flush_arp:49] grep '\?' +epprd_rg:cl_swap_IP_address[flush_arp:49] tr -d '()' +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.224 61.81.244.224 (61.81.244.224) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.239 61.81.244.239 (61.81.244.239) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.246 61.81.244.246 (61.81.244.246) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.123 61.81.244.123 (61.81.244.123) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.145 61.81.244.145 (61.81.244.145) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.146 61.81.244.146 (61.81.244.146) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:50] arp -d 61.81.244.1 61.81.244.1 (61.81.244.1) deleted +epprd_rg:cl_swap_IP_address[flush_arp:49] read host addr other +epprd_rg:cl_swap_IP_address[flush_arp:52] return 0 +epprd_rg:cl_swap_IP_address[716] netstat -in Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll en0 1500 link#2 fa.e6.13.4e.a9.20 16130 0 5554 0 0 en0 1500 61.81.244 61.81.244.156 16130 0 5554 0 0 en0 1500 61.81.244 61.81.244.134 16130 0 5554 0 0 lo0 16896 link#1 2508 0 2508 0 0 lo0 16896 127 127.0.0.1 2508 0 2508 0 0 lo0 16896 ::1%1 2508 0 2508 0 0 +epprd_rg:cl_swap_IP_address[717] netstat -rnC Routing tables Destination Gateway Flags Wt Policy If Cost Config_Cost Route tree for Protocol Family 2 (Internet): default 61.81.244.1 UG 1 - en0 0 0 61.81.244.0 61.81.244.156 UHSb 1 - en0 0 0 => 61.81.244/24 61.81.244.156 U 1 - en0 0 0 61.81.244.134 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.156 127.0.0.1 UGHS 1 - lo0 0 0 61.81.244.255 61.81.244.156 UHSb 1 - en0 0 0 127/8 127.0.0.1 U 1 - lo0 0 0 Route tree for Protocol Family 24 (Internet v6): ::1%1 ::1%1 UH 1 - lo0 0 0 +epprd_rg:cl_swap_IP_address[989] no -o ipignoreredirects=0 Setting ipignoreredirects to 0 +epprd_rg:cl_swap_IP_address[992] cl_echo 32 'Completed execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0. Exit status = 0' /usr/es/sbin/cluster/events/utils/cl_swap_IP_address 'rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0' 0 Sep 30 2023 03:25:15Completed execution of /usr/es/sbin/cluster/events/utils/cl_swap_IP_address with parameters rotating acquire en0 61.81.244.156 61.81.244.134 255.255.255.0. Exit status = 0+epprd_rg:cl_swap_IP_address[994] date Sat Sep 30 03:25:15 KORST 2023 +epprd_rg:cl_swap_IP_address[996] exit 0 +epprd_rg:acquire_service_addr[537] RC=0 +epprd_rg:acquire_service_addr[539] (( 0 != 0 )) +epprd_rg:acquire_service_addr[549] [[ true == false ]] +epprd_rg:acquire_service_addr[560] cl_RMupdate resource_up All_nonerror_service_addrs acquire_service_addr 2023-09-30T03:25:15.528125 2023-09-30T03:25:15.532843 +epprd_rg:acquire_service_addr[565] [[ UNDEFINED != UNDEFINED ]] +epprd_rg:acquire_service_addr[568] NSORDER='' +epprd_rg:acquire_service_addr[568] export NSORDER +epprd_rg:acquire_service_addr[571] [[ true == false ]] +epprd_rg:acquire_service_addr[579] exit 0 Sep 30 2023 03:25:15 EVENT COMPLETED: acquire_service_addr 0 |2023-09-30T03:25:15|18770|EVENT COMPLETED: acquire_service_addr 0| +epprd_rg:process_resources[acquire_service_labels:3087] RC=0 +epprd_rg:process_resources[acquire_service_labels:3089] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[acquire_service_labels:3104] (( 0 != 0 )) +epprd_rg:process_resources[acquire_service_labels:3110] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. +epprd_rg:process_resources[acquire_service_labels:3112] return 0 +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-09-30T03:25:15.608979 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=DISKS ACTION=ACQUIRE HDISKS='"hdisk2,hdisk3,hdisk4,hdisk5,hdisk6,hdisk7,hdisk8"' RESOURCE_GROUPS='"epprd_rg' '"' VOLUME_GROUPS='"datavg,datavg,datavg,datavg,datavg,datavg,datavg"' +epprd_rg:process_resources[1] JOB_TYPE=DISKS +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] HDISKS=hdisk2,hdisk3,hdisk4,hdisk5,hdisk6,hdisk7,hdisk8 +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] VOLUME_GROUPS=datavg,datavg,datavg,datavg,datavg,datavg,datavg +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ DISKS == RELEASE ]] +epprd_rg:process_resources[3360] [[ DISKS == ONLINE ]] +epprd_rg:process_resources[3439] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[3441] FAILED_RR_RGS='' +epprd_rg:process_resources[3442] get_disks_main +epprd_rg:process_resources[get_disks_main:981] PS4_FUNC=get_disks_main +epprd_rg:process_resources[get_disks_main:981] typeset PS4_FUNC +epprd_rg:process_resources[get_disks_main:982] [[ high == high ]] +epprd_rg:process_resources[get_disks_main:982] set -x +epprd_rg:process_resources[get_disks_main:983] SKIPBRKRES=0 +epprd_rg:process_resources[get_disks_main:983] typeset -li SKIPBRKRES +epprd_rg:process_resources[get_disks_main:984] STAT=0 +epprd_rg:process_resources[get_disks_main:985] FAILURE_IN_METHOD=0 +epprd_rg:process_resources[get_disks_main:985] typeset -li FAILURE_IN_METHOD +epprd_rg:process_resources[get_disks_main:986] LIST_OF_FAILED_RGS='' +epprd_rg:process_resources[get_disks_main:989] : Below are the list of resources as generated by clrgpa +epprd_rg:process_resources[get_disks_main:991] RG_LIST=epprd_rg +epprd_rg:process_resources[get_disks_main:992] RDISK_LIST='' +epprd_rg:process_resources[get_disks_main:993] DISK_LIST=hdisk2,hdisk3,hdisk4,hdisk5,hdisk6,hdisk7,hdisk8 +epprd_rg:process_resources[get_disks_main:994] VG_LIST=datavg,datavg,datavg,datavg,datavg,datavg,datavg +epprd_rg:process_resources[get_disks_main:997] : Resource groups are processed individually. This is required because +epprd_rg:process_resources[get_disks_main:998] : the replication mechanism may differ between resource groups. +epprd_rg:process_resources[get_disks_main:1002] getReplicatedResources epprd_rg +epprd_rg:process_resources[getReplicatedResources:699] PS4_FUNC=getReplicatedResources +epprd_rg:process_resources[getReplicatedResources:699] typeset PS4_FUNC +epprd_rg:process_resources[getReplicatedResources:700] [[ high == high ]] +epprd_rg:process_resources[getReplicatedResources:700] set -x +epprd_rg:process_resources[getReplicatedResources:702] RV=false +epprd_rg:process_resources[getReplicatedResources:704] clodmget -n -f type HACMPrresmethods +epprd_rg:process_resources[getReplicatedResources:704] [[ -n 9 ]] +epprd_rg:process_resources[getReplicatedResources:707] : Replicated resource methods are defined, check for resources +epprd_rg:process_resources[getReplicatedResources:709] clodmget -q $'name like \'*_REP_RESOURCE\' AND group=epprd_rg' -f value -n HACMPresource +epprd_rg:process_resources[getReplicatedResources:709] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:718] : Verify if any backup profiles are configured and trigger cbm utilities based on that +epprd_rg:process_resources[getReplicatedResources:720] clodmget -q name=BACKUP_ENABLED -f value HACMPresource +epprd_rg:process_resources[getReplicatedResources:720] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:739] echo false +epprd_rg:process_resources[get_disks_main:1002] REPLICATED_RESOURCES=false +epprd_rg:process_resources[get_disks_main:1005] : Break out the resources for resource group epprd_rg +epprd_rg:process_resources[get_disks_main:1007] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[get_disks_main:1008] VOLUME_GROUPS='' +epprd_rg:process_resources[get_disks_main:1009] HDISKS='' +epprd_rg:process_resources[get_disks_main:1010] RHDISKS='' +epprd_rg:process_resources[get_disks_main:1011] RDISK_LIST='' +epprd_rg:process_resources[get_disks_main:1014] : Get the volume groups in resource group epprd_rg +epprd_rg:process_resources[get_disks_main:1016] print datavg,datavg,datavg,datavg,datavg,datavg,datavg +epprd_rg:process_resources[get_disks_main:1016] read VOLUME_GROUPS VG_LIST +epprd_rg:process_resources[get_disks_main:1016] IFS=: +epprd_rg:process_resources[get_disks_main:1018] : Removing duplicate entries in VG list. +epprd_rg:process_resources[get_disks_main:1020] echo datavg,datavg,datavg,datavg,datavg,datavg,datavg +epprd_rg:process_resources[get_disks_main:1020] tr , '\n' +epprd_rg:process_resources[get_disks_main:1020] xargs +epprd_rg:process_resources[get_disks_main:1020] sort -u +epprd_rg:process_resources[get_disks_main:1020] VOLUME_GROUPS=datavg +epprd_rg:process_resources[get_disks_main:1022] : Get the disks corresponding to these volume groups +epprd_rg:process_resources[get_disks_main:1024] print hdisk2,hdisk3,hdisk4,hdisk5,hdisk6,hdisk7,hdisk8 +epprd_rg:process_resources[get_disks_main:1024] read HDISKS DISK_LIST +epprd_rg:process_resources[get_disks_main:1024] IFS=: +epprd_rg:process_resources[get_disks_main:1025] HDISKS='hdisk2 hdisk3 hdisk4 hdisk5 hdisk6 hdisk7 hdisk8' +epprd_rg:process_resources[get_disks_main:1031] : Pick up any raw disks not returned by clrgpa +epprd_rg:process_resources[get_disks_main:1033] clodmget -q group='epprd_rg AND name=RAW_DISK' HACMPresource +epprd_rg:process_resources[get_disks_main:1033] [[ -n '' ]] +epprd_rg:process_resources[get_disks_main:1042] : Get any raw disks in resource group epprd_rg +epprd_rg:process_resources[get_disks_main:1045] print +epprd_rg:process_resources[get_disks_main:1045] read RHDISKS RDISK_LIST +epprd_rg:process_resources[get_disks_main:1045] IFS=: +epprd_rg:process_resources[get_disks_main:1046] RHDISKS='' +epprd_rg:process_resources[get_disks_main:1047] print datavg +epprd_rg:process_resources[get_disks_main:1047] read VOLUME_GROUPS +epprd_rg:process_resources[get_disks_main:1051] : At this point, the global variables below should be set to +epprd_rg:process_resources[get_disks_main:1052] : the values associated with resource group epprd_rg +epprd_rg:process_resources[get_disks_main:1054] export RESOURCE_GROUPS +epprd_rg:process_resources[get_disks_main:1055] export VOLUME_GROUPS +epprd_rg:process_resources[get_disks_main:1056] export HDISKS +epprd_rg:process_resources[get_disks_main:1057] export RHDISKS +epprd_rg:process_resources[get_disks_main:1059] [[ false == true ]] +epprd_rg:process_resources[get_disks_main:1182] get_disks +epprd_rg:process_resources[get_disks:1198] PS4_FUNC=get_disks +epprd_rg:process_resources[get_disks:1198] typeset PS4_FUNC +epprd_rg:process_resources[get_disks:1199] [[ high == high ]] +epprd_rg:process_resources[get_disks:1199] set -x +epprd_rg:process_resources[get_disks:1201] STAT=0 +epprd_rg:process_resources[get_disks:1204] : Most volume groups are Enhanced Concurrent Mode, and it should +epprd_rg:process_resources[get_disks:1205] : not be necessary to break reserves. If all the volume groups +epprd_rg:process_resources[get_disks:1206] : are ECM, we should be able to skip breaking reserves. If it +epprd_rg:process_resources[get_disks:1207] : turns out that there is a reserve on a disk in an ECM volume +epprd_rg:process_resources[get_disks:1208] : group, that will be handled by cl_pvo making an explicit call +epprd_rg:process_resources[get_disks:1209] : to cl_disk_available. +epprd_rg:process_resources[get_disks:1213] all_ecm=TRUE +epprd_rg:process_resources[get_disks:1214] IFS=: +epprd_rg:process_resources[get_disks:1214] set -- datavg +epprd_rg:process_resources[get_disks:1214] print datavg +epprd_rg:process_resources[get_disks:1216] print datavg +epprd_rg:process_resources[get_disks:1216] sort -u +epprd_rg:process_resources[get_disks:1216] tr , '\n' +epprd_rg:process_resources[get_disks:1218] clodmget -q 'name = datavg and attribute = conc_capable' -f value -n CuAt +epprd_rg:process_resources[get_disks:1218] [[ y != y ]] +epprd_rg:process_resources[get_disks:1224] [[ TRUE == FALSE ]] +epprd_rg:process_resources[get_disks:1226] [[ TRUE == TRUE ]] +epprd_rg:process_resources[get_disks:1226] return 0 +epprd_rg:process_resources[get_disks_main:1183] STAT=0 +epprd_rg:process_resources[get_disks_main:1186] return 0 +epprd_rg:process_resources[3443] tr ' ' '\n' +epprd_rg:process_resources[3443] echo +epprd_rg:process_resources[3443] FAILED_RR_RGS='' +epprd_rg:process_resources[3444] [[ -n '' ]] +epprd_rg:process_resources[3450] clodmget -n -q policy=scsi -f value HACMPsplitmerge +epprd_rg:process_resources[3450] SCSIPR_ENABLED='' +epprd_rg:process_resources[3450] typeset SCSIPR_ENABLED +epprd_rg:process_resources[3451] [[ '' == Yes ]] +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-09-30T03:25:15.687660 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=VGS ACTION=ACQUIRE CONCURRENT_VOLUME_GROUP='""' VOLUME_GROUPS='"datavg"' RESOURCE_GROUPS='"epprd_rg' '"' EXPORT_FILESYSTEM='""' +epprd_rg:process_resources[1] JOB_TYPE=VGS +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] CONCURRENT_VOLUME_GROUP='' +epprd_rg:process_resources[1] VOLUME_GROUPS=datavg +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] EXPORT_FILESYSTEM='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ VGS == RELEASE ]] +epprd_rg:process_resources[3360] [[ VGS == ONLINE ]] +epprd_rg:process_resources[3571] process_volume_groups_main ACQUIRE +epprd_rg:process_resources[process_volume_groups_main:2293] PS4_FUNC=process_volume_groups_main +epprd_rg:process_resources[process_volume_groups_main:2293] typeset PS4_FUNC +epprd_rg:process_resources[process_volume_groups_main:2294] [[ high == high ]] +epprd_rg:process_resources[process_volume_groups_main:2294] set -x +epprd_rg:process_resources[process_volume_groups_main:2295] DEF_VARYON_ACTION=0 +epprd_rg:process_resources[process_volume_groups_main:2295] typeset -li DEF_VARYON_ACTION +epprd_rg:process_resources[process_volume_groups_main:2296] FAILURE_IN_METHOD=0 +epprd_rg:process_resources[process_volume_groups_main:2296] typeset -li FAILURE_IN_METHOD +epprd_rg:process_resources[process_volume_groups_main:2297] ACTION=ACQUIRE +epprd_rg:process_resources[process_volume_groups_main:2297] typeset ACTION +epprd_rg:process_resources[process_volume_groups_main:2298] STAT=0 +epprd_rg:process_resources[process_volume_groups_main:2299] VG_LIST=datavg +epprd_rg:process_resources[process_volume_groups_main:2300] RG_LIST=epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2304] getReplicatedResources epprd_rg +epprd_rg:process_resources[getReplicatedResources:699] PS4_FUNC=getReplicatedResources +epprd_rg:process_resources[getReplicatedResources:699] typeset PS4_FUNC +epprd_rg:process_resources[getReplicatedResources:700] [[ high == high ]] +epprd_rg:process_resources[getReplicatedResources:700] set -x +epprd_rg:process_resources[getReplicatedResources:702] RV=false +epprd_rg:process_resources[getReplicatedResources:704] clodmget -n -f type HACMPrresmethods +epprd_rg:process_resources[getReplicatedResources:704] [[ -n 9 ]] +epprd_rg:process_resources[getReplicatedResources:707] : Replicated resource methods are defined, check for resources +epprd_rg:process_resources[getReplicatedResources:709] clodmget -q $'name like \'*_REP_RESOURCE\' AND group=epprd_rg' -f value -n HACMPresource +epprd_rg:process_resources[getReplicatedResources:709] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:718] : Verify if any backup profiles are configured and trigger cbm utilities based on that +epprd_rg:process_resources[getReplicatedResources:720] clodmget -q name=BACKUP_ENABLED -f value HACMPresource +epprd_rg:process_resources[getReplicatedResources:720] [[ -n '' ]] +epprd_rg:process_resources[getReplicatedResources:739] echo false +epprd_rg:process_resources[process_volume_groups_main:2304] REPLICATED_RESOURCES=false +epprd_rg:process_resources[process_volume_groups_main:2305] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2306] print -- datavg +epprd_rg:process_resources[process_volume_groups_main:2306] read VOLUME_GROUPS VG_LIST +epprd_rg:process_resources[process_volume_groups_main:2306] IFS=: +epprd_rg:process_resources[process_volume_groups_main:2307] VOLUME_GROUPS=datavg +epprd_rg:process_resources[process_volume_groups_main:2310] : At this point, these variables contain information only for epprd_rg +epprd_rg:process_resources[process_volume_groups_main:2312] export VOLUME_GROUPS +epprd_rg:process_resources[process_volume_groups_main:2313] export RESOURCE_GROUPS +epprd_rg:process_resources[process_volume_groups_main:2315] [[ false == true ]] +epprd_rg:process_resources[process_volume_groups_main:2555] process_volume_groups ACQUIRE +epprd_rg:process_resources[process_volume_groups:2571] PS4_FUNC=process_volume_groups +epprd_rg:process_resources[process_volume_groups:2571] typeset PS4_FUNC +epprd_rg:process_resources[process_volume_groups:2572] [[ high == high ]] +epprd_rg:process_resources[process_volume_groups:2572] set -x +epprd_rg:process_resources[process_volume_groups:2573] STAT=0 +epprd_rg:process_resources[process_volume_groups:2575] GROUPNAME=epprd_rg +epprd_rg:process_resources[process_volume_groups:2575] export GROUPNAME +epprd_rg:process_resources[process_volume_groups:2578] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[process_volume_groups:2581] : Varyon the VGs in the environment +epprd_rg:process_resources[process_volume_groups:2583] cl_activate_vgs -n +epprd_rg:cl_activate_vgs[213] [[ high == high ]] +epprd_rg:cl_activate_vgs[213] version=1.46 +epprd_rg:cl_activate_vgs[215] STATUS=0 +epprd_rg:cl_activate_vgs[215] typeset -li STATUS +epprd_rg:cl_activate_vgs[216] SYNCFLAG='' +epprd_rg:cl_activate_vgs[217] CLENV='' +epprd_rg:cl_activate_vgs[218] TMP_FILENAME=/tmp/_activate_vgs.tmp +epprd_rg:cl_activate_vgs[219] USE_OEM_METHODS=false +epprd_rg:cl_activate_vgs[221] PROC_RES=false +epprd_rg:cl_activate_vgs[225] [[ VGS != 0 ]] +epprd_rg:cl_activate_vgs[225] [[ VGS != GROUP ]] +epprd_rg:cl_activate_vgs[226] PROC_RES=true +epprd_rg:cl_activate_vgs[232] [[ -n == -n ]] +epprd_rg:cl_activate_vgs[234] SYNCFLAG=-n +epprd_rg:cl_activate_vgs[235] shift +epprd_rg:cl_activate_vgs[240] (( 0 != 0 )) +epprd_rg:cl_activate_vgs[247] set -u +epprd_rg:cl_activate_vgs[250] rm -f /tmp/_activate_vgs.tmp +epprd_rg:cl_activate_vgs[254] lsvg -L -o +epprd_rg:cl_activate_vgs[254] print caavg_private rootvg +epprd_rg:cl_activate_vgs[254] VGSTATUS='caavg_private rootvg' +epprd_rg:cl_activate_vgs[257] ALLVGS=All_volume_groups +epprd_rg:cl_activate_vgs[258] cl_RMupdate resource_acquiring All_volume_groups cl_activate_vgs 2023-09-30T03:25:15.760208 2023-09-30T03:25:15.764707 +epprd_rg:cl_activate_vgs[262] [[ true == false ]] +epprd_rg:cl_activate_vgs[285] LIST_OF_VOLUME_GROUPS_FOR_RG='' +epprd_rg:cl_activate_vgs[289] export GROUPNAME +epprd_rg:cl_activate_vgs[291] echo datavg +epprd_rg:cl_activate_vgs[291] read LIST_OF_VOLUME_GROUPS_FOR_RG VOLUME_GROUPS +epprd_rg:cl_activate_vgs[291] IFS=: +epprd_rg:cl_activate_vgs[294] echo datavg +epprd_rg:cl_activate_vgs[296] sort -u +epprd_rg:cl_activate_vgs[295] tr , '\n' +epprd_rg:cl_activate_vgs[294] LIST_OF_VOLUME_GROUPS_FOR_RG=datavg +epprd_rg:cl_activate_vgs[298] vgs_list datavg +epprd_rg:cl_activate_vgs[vgs_list:178] PS4_LOOP='' +epprd_rg:cl_activate_vgs[vgs_list:178] typeset PS4_LOOP +epprd_rg:cl_activate_vgs:datavg[vgs_list:182] PS4_LOOP=datavg +epprd_rg:cl_activate_vgs:datavg[vgs_list:186] [[ 'caavg_private rootvg' == @(?(*\ )datavg?(\ *)) ]] +epprd_rg:cl_activate_vgs:datavg[vgs_list:192] : call varyon for the volume group in Foreground +epprd_rg:cl_activate_vgs:datavg[vgs_list:194] vgs_chk datavg -n cl_activate_vgs +epprd_rg:cl_activate_vgs(0.053):datavg[vgs_chk:78] VG=datavg +epprd_rg:cl_activate_vgs(0.053):datavg[vgs_chk:78] typeset VG +epprd_rg:cl_activate_vgs(0.053):datavg[vgs_chk:79] SYNCFLAG=-n +epprd_rg:cl_activate_vgs(0.053):datavg[vgs_chk:79] typeset SYNCFLAG +epprd_rg:cl_activate_vgs(0.053):datavg[vgs_chk:80] PROGNAME=cl_activate_vgs +epprd_rg:cl_activate_vgs(0.053):datavg[vgs_chk:80] typeset PROGNAME +epprd_rg:cl_activate_vgs(0.053):datavg[vgs_chk:81] STATUS=0 +epprd_rg:cl_activate_vgs(0.053):datavg[vgs_chk:81] typeset -li STATUS +epprd_rg:cl_activate_vgs(0.053):datavg[vgs_chk:83] [[ -n '' ]] +epprd_rg:cl_activate_vgs(0.053):datavg[vgs_chk:100] amlog_trace '' 'Activating Volume Group|datavg' +epprd_rg:cl_activate_vgs(0.053):datavg[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_vgs(0.054):datavg[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_vgs(0.078):datavg[amlog_trace:319] cltime +epprd_rg:cl_activate_vgs(0.081):datavg[amlog_trace:319] DATE=2023-09-30T03:25:15.802203 +epprd_rg:cl_activate_vgs(0.081):datavg[amlog_trace:320] echo '|2023-09-30T03:25:15.802203|INFO: Activating Volume Group|datavg' +epprd_rg:cl_activate_vgs(0.081):datavg[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_vgs(0.081):datavg[vgs_chk:102] typeset -x ERRMSG +epprd_rg:cl_activate_vgs(0.081):datavg[vgs_chk:103] clvaryonvg -n datavg +epprd_rg:clvaryonvg(0.009):datavg[985] version=1.21.7.22 +epprd_rg:clvaryonvg(0.009):datavg[989] : Without this test, cause of failure due to non-root may not be obvious +epprd_rg:clvaryonvg(0.009):datavg[991] [[ -z '' ]] +epprd_rg:clvaryonvg(0.009):datavg[991] id -nu +epprd_rg:clvaryonvg(0.010):datavg[991] 2> /dev/null +epprd_rg:clvaryonvg(0.012):datavg[991] user_name=root +epprd_rg:clvaryonvg(0.012):datavg[994] : Check if RBAC is enabled +epprd_rg:clvaryonvg(0.012):datavg[996] is_rbac_enabled='' +epprd_rg:clvaryonvg(0.012):datavg[996] typeset is_rbac_enabled +epprd_rg:clvaryonvg(0.012):datavg[997] clodmget -nq group='LDAPClient and name=RBACConfig' -f value HACMPLDAP +epprd_rg:clvaryonvg(0.013):datavg[997] 2> /dev/null +epprd_rg:clvaryonvg(0.016):datavg[997] is_rbac_enabled='' +epprd_rg:clvaryonvg(0.016):datavg[999] role='' +epprd_rg:clvaryonvg(0.016):datavg[999] typeset role +epprd_rg:clvaryonvg(0.016):datavg[1000] [[ root != root ]] +epprd_rg:clvaryonvg(0.016):datavg[1009] LEAVEOFF=FALSE +epprd_rg:clvaryonvg(0.016):datavg[1010] FORCEON='' +epprd_rg:clvaryonvg(0.016):datavg[1011] FORCEUPD=FALSE +epprd_rg:clvaryonvg(0.016):datavg[1012] NOQUORUM=20 +epprd_rg:clvaryonvg(0.016):datavg[1013] MISSING_UPDATES=30 +epprd_rg:clvaryonvg(0.016):datavg[1014] DATA_DIVERGENCE=31 +epprd_rg:clvaryonvg(0.016):datavg[1015] ARGS='' +epprd_rg:clvaryonvg(0.016):datavg[1016] typeset -li varyonvg_rc +epprd_rg:clvaryonvg(0.016):datavg[1017] typeset -li MAXLVS +epprd_rg:clvaryonvg(0.016):datavg[1018] ENODEV=19 +epprd_rg:clvaryonvg(0.016):datavg[1018] typeset -li ENODEV +epprd_rg:clvaryonvg(0.016):datavg[1020] set -u +epprd_rg:clvaryonvg(0.016):datavg[1022] /bin/dspmsg -s 2 cspoc.cat 31 'usage: clvaryonvg [-F] [-f] [-n] [-p] [-s] [-o] \n' +epprd_rg:clvaryonvg(0.018):datavg[1022] USAGE='usage: clvaryonvg [-F] [-f] [-n] [-p] [-s] [-o] ' +epprd_rg:clvaryonvg(0.019):datavg[1023] (( 2 < 1 )) +epprd_rg:clvaryonvg(0.019):datavg[1029] : Parse the options +epprd_rg:clvaryonvg(0.019):datavg[1031] S_FLAG='' +epprd_rg:clvaryonvg(0.019):datavg[1032] P_FLAG='' +epprd_rg:clvaryonvg(0.019):datavg[1033] getopts :Ffnops option +epprd_rg:clvaryonvg(0.019):datavg[1038] : -n Always applied, retained for compatibility +epprd_rg:clvaryonvg(0.019):datavg[1033] getopts :Ffnops option +epprd_rg:clvaryonvg(0.019):datavg[1048] : Pick up the volume group name, which follows the options +epprd_rg:clvaryonvg(0.019):datavg[1050] shift 1 +epprd_rg:clvaryonvg(0.019):datavg[1051] VG=datavg +epprd_rg:clvaryonvg(0.019):datavg[1054] : Set up filenames we will be using +epprd_rg:clvaryonvg(0.019):datavg[1056] VGDIR=/usr/es/sbin/cluster/etc/vg/ +epprd_rg:clvaryonvg(0.019):datavg[1057] TSFILE=/usr/es/sbin/cluster/etc/vg/datavg.tstamp +epprd_rg:clvaryonvg(0.019):datavg[1058] DSFILE=/usr/es/sbin/cluster/etc/vg/datavg.desc +epprd_rg:clvaryonvg(0.019):datavg[1059] RPFILE=/usr/es/sbin/cluster/etc/vg/datavg.replay +epprd_rg:clvaryonvg(0.019):datavg[1060] permset=/usr/es/sbin/cluster/etc/vg/datavg.perms +epprd_rg:clvaryonvg(0.019):datavg[1061] failfile=/usr/es/sbin/cluster/etc/vg/datavg.fail +epprd_rg:clvaryonvg(0.019):datavg[1065] : Get some LVM information we are going to need in processing this +epprd_rg:clvaryonvg(0.019):datavg[1066] : volume group: +epprd_rg:clvaryonvg(0.019):datavg[1067] : - volume group identifier - vgid +epprd_rg:clvaryonvg(0.019):datavg[1068] : - list of disks +epprd_rg:clvaryonvg(0.019):datavg[1069] : - quorum indicator +epprd_rg:clvaryonvg(0.019):datavg[1070] : - timestamp if present +epprd_rg:clvaryonvg(0.019):datavg[1072] /usr/sbin/getlvodm -v datavg +epprd_rg:clvaryonvg(0.022):datavg[1072] VGID=00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(0.024):datavg[1073] cut '-d ' -f2 +epprd_rg:clvaryonvg(0.024):datavg[1073] /usr/sbin/getlvodm -w 00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(0.027):datavg[1073] pvlst=$'hdisk2\nhdisk3\nhdisk4\nhdisk5\nhdisk6\nhdisk7\nhdisk8' +epprd_rg:clvaryonvg(0.027):datavg[1074] /usr/sbin/getlvodm -Q datavg +epprd_rg:clvaryonvg(0.030):datavg[1074] quorum=y +epprd_rg:clvaryonvg(0.031):datavg[1075] TS_FROM_DISK='' +epprd_rg:clvaryonvg(0.031):datavg[1076] TS_FROM_ODM='' +epprd_rg:clvaryonvg(0.031):datavg[1077] GOOD_PV='' +epprd_rg:clvaryonvg(0.031):datavg[1078] O_flag='' +epprd_rg:clvaryonvg(0.031):datavg[1079] A_flag='' +epprd_rg:clvaryonvg(0.031):datavg[1080] mode_flag='' +epprd_rg:clvaryonvg(0.031):datavg[1081] vg_on_mode='' +epprd_rg:clvaryonvg(0.031):datavg[1082] vg_set_passive=FALSE +epprd_rg:clvaryonvg(0.031):datavg[1084] odmget -q 'attribute = varyon_state' PdAt +epprd_rg:clvaryonvg(0.034):datavg[1084] [[ -n $'\nPdAt:\n\tuniquetype = "logical_volume/vgsubclass/vgtype"\n\tattribute = "varyon_state"\n\tdeflt = "0"\n\tvalues = "0,1,2,3"\n\twidth = ""\n\ttype = "R"\n\tgeneric = ""\n\trep = "l"\n\tnls_index = 0' ]] +epprd_rg:clvaryonvg(0.034):datavg[1087] : LVM may record that a volume group was varied on from an earlier +epprd_rg:clvaryonvg(0.034):datavg[1088] : IPL. Rely on HA state tracking, and override the LVM check +epprd_rg:clvaryonvg(0.034):datavg[1090] O_flag=-O +epprd_rg:clvaryonvg(0.034):datavg[1093] : Checking if SCSI PR is enabled and it is so, +epprd_rg:clvaryonvg(0.034):datavg[1094] : confirming if the SCSI PR reservations are intact. +epprd_rg:clvaryonvg(0.035):datavg[1096] lssrc -ls clstrmgrES +epprd_rg:clvaryonvg(0.035):datavg[1096] 2>& 1 +epprd_rg:clvaryonvg(0.035):datavg[1096] egrep -q -v 'ST_INIT|NOT_CONFIGURED' +epprd_rg:clvaryonvg(0.035):datavg[1096] grep 'Current state:' +epprd_rg:clvaryonvg(0.050):datavg[1098] clodmget -n -q policy=scsi -f value HACMPsplitmerge +epprd_rg:clvaryonvg(0.053):datavg[1098] SCSIPR_ENABLED='' +epprd_rg:clvaryonvg(0.053):datavg[1098] typeset SCSIPR_ENABLED +epprd_rg:clvaryonvg(0.053):datavg[1099] clodmget -q $'name like \'*VOLUME_GROUP\' and value = datavg' -f group -n HACMPresource +epprd_rg:clvaryonvg(0.056):datavg[1099] resgrp=epprd_rg +epprd_rg:clvaryonvg(0.056):datavg[1099] typeset resgrp +epprd_rg:clvaryonvg(0.056):datavg[1100] [[ '' == Yes ]] +epprd_rg:clvaryonvg(0.056):datavg[1134] : Operations such as varying on the volume group are likely to +epprd_rg:clvaryonvg(0.056):datavg[1135] : require read/write access. So, set any volume group fencing appropriately. +epprd_rg:clvaryonvg(0.057):datavg[1137] cl_set_vg_fence_height -c datavg rw +epprd_rg:clvaryonvg(0.060):datavg[1138] RC=0 +epprd_rg:clvaryonvg(0.060):datavg[1139] (( 19 == 0 )) +epprd_rg:clvaryonvg(0.060):datavg[1147] : Return code from volume group fencing for datavg is 0 +epprd_rg:clvaryonvg(0.060):datavg[1148] (( 0 != 0 )) +epprd_rg:clvaryonvg(0.060):datavg[1160] : Check on the current state of the volume group +epprd_rg:clvaryonvg(0.062):datavg[1182] grep -x -q datavg +epprd_rg:clvaryonvg(0.062):datavg[1182] lsvg -L +epprd_rg:clvaryonvg(0.065):datavg[1184] : The volume group is known - check to see if its already varyd on. +epprd_rg:clvaryonvg(0.066):datavg[1186] grep -x -q datavg +epprd_rg:clvaryonvg(0.066):datavg[1186] lsvg -L -o +epprd_rg:clvaryonvg(0.070):datavg[1190] lsvg -L datavg +epprd_rg:clvaryonvg(0.070):datavg[1190] 2> /dev/null +epprd_rg:clvaryonvg(0.070):datavg[1190] grep -q -i -w passive-only +epprd_rg:clvaryonvg(0.113):datavg[1191] vg_on_mode=passive +epprd_rg:clvaryonvg(0.115):datavg[1194] grep -iw removed +epprd_rg:clvaryonvg(0.115):datavg[1194] lsvg -p datavg +epprd_rg:clvaryonvg(0.115):datavg[1194] 2> /dev/null +epprd_rg:clvaryonvg(0.135):datavg[1194] removed_disks='' +epprd_rg:clvaryonvg(0.135):datavg[1195] [[ -n '' ]] +epprd_rg:clvaryonvg(0.135):datavg[1213] [[ -n passive ]] +epprd_rg:clvaryonvg(0.135):datavg[1215] lqueryvg -g 00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(0.136):datavg[1215] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.154):datavg[1321] : +epprd_rg:clvaryonvg(0.154):datavg[1322] : First, sniff at the disk to see if the local ODM information +epprd_rg:clvaryonvg(0.154):datavg[1323] : matches what is on the disk. +epprd_rg:clvaryonvg(0.154):datavg[1324] : +epprd_rg:clvaryonvg(0.154):datavg[1326] vgdatimestamps +epprd_rg:clvaryonvg(0.154):datavg[vgdatimestamps:201] PS4_FUNC=vgdatimestamps +epprd_rg:clvaryonvg(0.154):datavg[vgdatimestamps:201] typeset PS4_FUNC +epprd_rg:clvaryonvg(0.154):datavg[vgdatimestamps:202] [[ high == high ]] +epprd_rg:clvaryonvg(0.154):datavg[vgdatimestamps:202] set -x +epprd_rg:clvaryonvg(0.154):datavg[vgdatimestamps:203] set -u +epprd_rg:clvaryonvg(0.154):datavg[vgdatimestamps:206] : See what timestamp LVM has recorded from the last time it checked +epprd_rg:clvaryonvg(0.154):datavg[vgdatimestamps:207] : the disks +epprd_rg:clvaryonvg(0.154):datavg[vgdatimestamps:209] /usr/sbin/getlvodm -T 00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(0.155):datavg[vgdatimestamps:209] 2> /dev/null +epprd_rg:clvaryonvg(0.157):datavg[vgdatimestamps:209] TS_FROM_ODM=651523922bbb5897 +epprd_rg:clvaryonvg(0.157):datavg[vgdatimestamps:212] : Check to see if HACMP is maintaining a timestamp for this volume group +epprd_rg:clvaryonvg(0.157):datavg[vgdatimestamps:213] : Needed for some older volume groups +epprd_rg:clvaryonvg(0.157):datavg[vgdatimestamps:215] [[ -s /usr/es/sbin/cluster/etc/vg/datavg.tstamp ]] +epprd_rg:clvaryonvg(0.157):datavg[vgdatimestamps:234] : Get the time stamp from the actual disk +epprd_rg:clvaryonvg(0.158):datavg[vgdatimestamps:236] clvgdats /dev/datavg +epprd_rg:clvaryonvg(0.159):datavg[vgdatimestamps:236] 2> /dev/null +epprd_rg:clvaryonvg(0.169):datavg[vgdatimestamps:236] TS_FROM_DISK=651523922bbb5897 +epprd_rg:clvaryonvg(0.169):datavg[vgdatimestamps:237] clvgdats_rc=0 +epprd_rg:clvaryonvg(0.169):datavg[vgdatimestamps:238] (( 0 != 0 )) +epprd_rg:clvaryonvg(0.169):datavg[vgdatimestamps:247] [[ -z 651523922bbb5897 ]] +epprd_rg:clvaryonvg(0.169):datavg[1328] [[ 651523922bbb5897 != 651523922bbb5897 ]] +epprd_rg:clvaryonvg(0.169):datavg[1344] : There is a chance that a VG that should be in passive mode is not. +epprd_rg:clvaryonvg(0.169):datavg[1345] : Run cl_pvo to put it in passive mode if possible. +epprd_rg:clvaryonvg(0.169):datavg[1350] [[ -z passive ]] +epprd_rg:clvaryonvg(0.169):datavg[1350] [[ passive == ordinary ]] +epprd_rg:clvaryonvg(0.169):datavg[1350] [[ passive == passive ]] +epprd_rg:clvaryonvg(0.169):datavg[1350] [[ -n '' ]] +epprd_rg:clvaryonvg(0.169):datavg[1381] : Let us assume that the old style synclvodm would sync all the PV/FS changes. +epprd_rg:clvaryonvg(0.169):datavg[1383] expimpvg_notrequired=1 +epprd_rg:clvaryonvg(0.169):datavg[1386] : Optimistically give varyonvg a try. +epprd_rg:clvaryonvg(0.169):datavg[1388] [[ passive == passive ]] +epprd_rg:clvaryonvg(0.169):datavg[1391] : If the volume group was varyd on in passive mode when this node came +epprd_rg:clvaryonvg(0.169):datavg[1392] : up, flip it over to active mode. Following logic will then fall +epprd_rg:clvaryonvg(0.169):datavg[1393] : through to updatefs. +epprd_rg:clvaryonvg(0.169):datavg[1395] [[ passive == passive ]] +epprd_rg:clvaryonvg(0.169):datavg[1395] A_flag=-A +epprd_rg:clvaryonvg(0.169):datavg[1396] varyonvg -n -c -A -O datavg +epprd_rg:clvaryonvg(0.170):datavg[1396] 2>& 1 +epprd_rg:clvaryonvg(0.396):datavg[1396] varyonvg_output='' +epprd_rg:clvaryonvg(0.396):datavg[1397] varyonvg_rc=0 +epprd_rg:clvaryonvg(0.396):datavg[1397] typeset -li varyonvg_rc +epprd_rg:clvaryonvg(0.396):datavg[1399] (( 0 != 0 )) +epprd_rg:clvaryonvg(0.396):datavg[1481] (( 0 != 0 )) +epprd_rg:clvaryonvg(0.396):datavg[1576] : At this point, datavg should be varied on +epprd_rg:clvaryonvg(0.396):datavg[1578] [[ FALSE == TRUE ]] +epprd_rg:clvaryonvg(0.396):datavg[1585] [[ -z 651523922bbb5897 ]] +epprd_rg:clvaryonvg(0.396):datavg[1592] vgdatimestamps +epprd_rg:clvaryonvg(0.396):datavg[vgdatimestamps:201] PS4_FUNC=vgdatimestamps +epprd_rg:clvaryonvg(0.396):datavg[vgdatimestamps:201] typeset PS4_FUNC +epprd_rg:clvaryonvg(0.396):datavg[vgdatimestamps:202] [[ high == high ]] +epprd_rg:clvaryonvg(0.396):datavg[vgdatimestamps:202] set -x +epprd_rg:clvaryonvg(0.396):datavg[vgdatimestamps:203] set -u +epprd_rg:clvaryonvg(0.396):datavg[vgdatimestamps:206] : See what timestamp LVM has recorded from the last time it checked +epprd_rg:clvaryonvg(0.396):datavg[vgdatimestamps:207] : the disks +epprd_rg:clvaryonvg(0.396):datavg[vgdatimestamps:209] /usr/sbin/getlvodm -T 00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(0.397):datavg[vgdatimestamps:209] 2> /dev/null +epprd_rg:clvaryonvg(0.400):datavg[vgdatimestamps:209] TS_FROM_ODM=6517168c00d56747 +epprd_rg:clvaryonvg(0.400):datavg[vgdatimestamps:212] : Check to see if HACMP is maintaining a timestamp for this volume group +epprd_rg:clvaryonvg(0.400):datavg[vgdatimestamps:213] : Needed for some older volume groups +epprd_rg:clvaryonvg(0.400):datavg[vgdatimestamps:215] [[ -s /usr/es/sbin/cluster/etc/vg/datavg.tstamp ]] +epprd_rg:clvaryonvg(0.400):datavg[vgdatimestamps:234] : Get the time stamp from the actual disk +epprd_rg:clvaryonvg(0.400):datavg[vgdatimestamps:236] clvgdats /dev/datavg +epprd_rg:clvaryonvg(0.401):datavg[vgdatimestamps:236] 2> /dev/null +epprd_rg:clvaryonvg(0.410):datavg[vgdatimestamps:236] TS_FROM_DISK=6517168c00d56747 +epprd_rg:clvaryonvg(0.410):datavg[vgdatimestamps:237] clvgdats_rc=0 +epprd_rg:clvaryonvg(0.410):datavg[vgdatimestamps:238] (( 0 != 0 )) +epprd_rg:clvaryonvg(0.410):datavg[vgdatimestamps:247] [[ -z 6517168c00d56747 ]] +epprd_rg:clvaryonvg(0.410):datavg[1600] [[ 6517168c00d56747 != 6517168c00d56747 ]] +epprd_rg:clvaryonvg(0.410):datavg[1622] [[ FALSE == TRUE ]] +epprd_rg:clvaryonvg(0.410):datavg[1633] : Even if everything looks OK, update the local file system +epprd_rg:clvaryonvg(0.410):datavg[1634] : definitions, since changes there do not show up in the +epprd_rg:clvaryonvg(0.410):datavg[1635] : VGDA timestamps +epprd_rg:clvaryonvg(0.410):datavg[1637] updatefs datavg +epprd_rg:clvaryonvg(0.410):datavg[updatefs:506] PS4_FUNC=updatefs +epprd_rg:clvaryonvg(0.410):datavg[updatefs:506] typeset PS4_FUNC +epprd_rg:clvaryonvg(0.410):datavg[updatefs:507] [[ high == high ]] +epprd_rg:clvaryonvg(0.410):datavg[updatefs:507] set -x +epprd_rg:clvaryonvg(0.410):datavg[updatefs:508] do_imfs='' +epprd_rg:clvaryonvg(0.410):datavg[updatefs:508] typeset do_imfs +epprd_rg:clvaryonvg(0.410):datavg[updatefs:509] has_typed_lvs='' +epprd_rg:clvaryonvg(0.410):datavg[updatefs:509] typeset has_typed_lvs +epprd_rg:clvaryonvg(0.410):datavg[updatefs:512] : Delete existing filesystem information for this volume group. This is +epprd_rg:clvaryonvg(0.410):datavg[updatefs:513] : needed because imfs will not update an existing /etc/filesystems entry. +epprd_rg:clvaryonvg(0.412):datavg[updatefs:515] cut -f1 '-d ' +epprd_rg:clvaryonvg(0.412):datavg[updatefs:515] /usr/sbin/getlvodm -L datavg +epprd_rg:clvaryonvg(0.416):datavg[updatefs:515] lv_list=$'saplv\nsapmntlv\noraclelv\nepplv\noraarchlv\nsapdata1lv\nsapdata2lv\nsapdata3lv\nsapdata4lv\nboardlv\noriglogAlv\noriglogBlv\nmirrlogAlv\nmirrlogBlv\nepprdaloglv' +epprd_rg:clvaryonvg(0.416):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.416):datavg[updatefs:521] clodmget -q 'name = saplv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.420):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.420):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.420):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.420):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.420):datavg[updatefs:530] /usr/sbin/getlvcb -f saplv +epprd_rg:clvaryonvg(0.421):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.439):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.439):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.439):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.441):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.441):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.445):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.445):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.445):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.445):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.446):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.465):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.465):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.465):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.465):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.466):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.466):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.470):datavg[updatefs:545] /usr/sbin/imfs -lx saplv +epprd_rg:clvaryonvg(0.474):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.474):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.474):datavg[updatefs:521] clodmget -q 'name = sapmntlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.478):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.478):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.478):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.478):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.478):datavg[updatefs:530] /usr/sbin/getlvcb -f sapmntlv +epprd_rg:clvaryonvg(0.479):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.496):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.496):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.496):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.498):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.498):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.502):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.502):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.502):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.502):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.503):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.523):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.523):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.523):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.523):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.524):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.524):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.527):datavg[updatefs:545] /usr/sbin/imfs -lx sapmntlv +epprd_rg:clvaryonvg(0.531):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.531):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.531):datavg[updatefs:521] clodmget -q 'name = oraclelv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.535):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.535):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.535):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.535):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.535):datavg[updatefs:530] /usr/sbin/getlvcb -f oraclelv +epprd_rg:clvaryonvg(0.536):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.553):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.553):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.553):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.555):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.555):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.559):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.559):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.559):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.559):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.560):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.579):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.579):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.579):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.579):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.580):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.580):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.583):datavg[updatefs:545] /usr/sbin/imfs -lx oraclelv +epprd_rg:clvaryonvg(0.588):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.588):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.588):datavg[updatefs:521] clodmget -q 'name = epplv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.591):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.591):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.591):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.591):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.591):datavg[updatefs:530] /usr/sbin/getlvcb -f epplv +epprd_rg:clvaryonvg(0.592):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.610):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.610):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.610):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.612):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.612):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.615):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.615):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.615):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.616):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.617):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.635):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.635):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.635):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.635):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.636):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.637):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.640):datavg[updatefs:545] /usr/sbin/imfs -lx epplv +epprd_rg:clvaryonvg(0.644):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.644):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.644):datavg[updatefs:521] clodmget -q 'name = oraarchlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.647):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.647):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.647):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.647):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.647):datavg[updatefs:530] /usr/sbin/getlvcb -f oraarchlv +epprd_rg:clvaryonvg(0.648):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.666):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.666):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.666):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.668):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.668):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.671):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.671):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.671):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.671):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.673):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.691):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.691):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.691):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.691):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.692):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.692):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.695):datavg[updatefs:545] /usr/sbin/imfs -lx oraarchlv +epprd_rg:clvaryonvg(0.700):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.700):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.700):datavg[updatefs:521] clodmget -q 'name = sapdata1lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.703):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.703):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.703):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.703):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.703):datavg[updatefs:530] /usr/sbin/getlvcb -f sapdata1lv +epprd_rg:clvaryonvg(0.704):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.722):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.722):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.722):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.724):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.724):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.728):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.728):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.728):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.728):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.729):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.748):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.748):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.748):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.748):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.749):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.749):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.752):datavg[updatefs:545] /usr/sbin/imfs -lx sapdata1lv +epprd_rg:clvaryonvg(0.756):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.756):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.756):datavg[updatefs:521] clodmget -q 'name = sapdata2lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.760):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.760):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.760):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.760):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.760):datavg[updatefs:530] /usr/sbin/getlvcb -f sapdata2lv +epprd_rg:clvaryonvg(0.761):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.778):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.778):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.778):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.780):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.780):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.784):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.784):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.784):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.784):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.785):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.804):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.804):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.804):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.804):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.806):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.805):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.809):datavg[updatefs:545] /usr/sbin/imfs -lx sapdata2lv +epprd_rg:clvaryonvg(0.813):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.813):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.813):datavg[updatefs:521] clodmget -q 'name = sapdata3lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.816):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.816):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.816):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.817):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.817):datavg[updatefs:530] /usr/sbin/getlvcb -f sapdata3lv +epprd_rg:clvaryonvg(0.817):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.835):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.835):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.835):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.837):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.837):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.841):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.841):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.841):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.841):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.842):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.860):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.860):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.860):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.860):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.861):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.861):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.864):datavg[updatefs:545] /usr/sbin/imfs -lx sapdata3lv +epprd_rg:clvaryonvg(0.869):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.869):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.869):datavg[updatefs:521] clodmget -q 'name = sapdata4lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.872):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.873):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.873):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.873):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.873):datavg[updatefs:530] /usr/sbin/getlvcb -f sapdata4lv +epprd_rg:clvaryonvg(0.874):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.890):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.890):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.890):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.892):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.892):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.896):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.896):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.896):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.896):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.897):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.915):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.915):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.915):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.915):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.917):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.916):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.920):datavg[updatefs:545] /usr/sbin/imfs -lx sapdata4lv +epprd_rg:clvaryonvg(0.924):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.924):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.924):datavg[updatefs:521] clodmget -q 'name = boardlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.927):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.927):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.927):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.927):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.927):datavg[updatefs:530] /usr/sbin/getlvcb -f boardlv +epprd_rg:clvaryonvg(0.928):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(0.946):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.946):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(0.947):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(0.948):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(0.948):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(0.952):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(0.952):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(0.952):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(0.952):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(0.953):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(0.973):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(0.973):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(0.973):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(0.973):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(0.974):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(0.974):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(0.977):datavg[updatefs:545] /usr/sbin/imfs -lx boardlv +epprd_rg:clvaryonvg(0.982):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(0.982):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(0.982):datavg[updatefs:521] clodmget -q 'name = origlogAlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(0.985):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(0.985):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(0.985):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(0.985):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(0.985):datavg[updatefs:530] /usr/sbin/getlvcb -f origlogAlv +epprd_rg:clvaryonvg(0.986):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.004):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.004):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.004):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.006):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.006):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(1.009):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.009):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.009):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.009):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.011):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.029):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.029):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.029):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.029):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.030):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.030):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.033):datavg[updatefs:545] /usr/sbin/imfs -lx origlogAlv +epprd_rg:clvaryonvg(1.037):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.038):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.038):datavg[updatefs:521] clodmget -q 'name = origlogBlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.041):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.041):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.041):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.041):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.041):datavg[updatefs:530] /usr/sbin/getlvcb -f origlogBlv +epprd_rg:clvaryonvg(1.042):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.060):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.060):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.060):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.062):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.062):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(1.065):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.065):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.065):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.065):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.067):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.085):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.085):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.085):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.085):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.086):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.086):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.090):datavg[updatefs:545] /usr/sbin/imfs -lx origlogBlv +epprd_rg:clvaryonvg(1.094):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.094):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.094):datavg[updatefs:521] clodmget -q 'name = mirrlogAlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.098):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.098):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.098):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.098):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.098):datavg[updatefs:530] /usr/sbin/getlvcb -f mirrlogAlv +epprd_rg:clvaryonvg(1.099):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.116):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.117):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.117):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.118):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.118):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(1.122):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.122):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.122):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.122):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.124):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.142):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.142):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.142):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.142):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.143):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.143):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.147):datavg[updatefs:545] /usr/sbin/imfs -lx mirrlogAlv +epprd_rg:clvaryonvg(1.151):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.151):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.151):datavg[updatefs:521] clodmget -q 'name = mirrlogBlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.154):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.154):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.154):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.154):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.154):datavg[updatefs:530] /usr/sbin/getlvcb -f mirrlogBlv +epprd_rg:clvaryonvg(1.155):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.173):datavg[updatefs:530] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.173):datavg[updatefs:531] [[ -n vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.173):datavg[updatefs:531] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.175):datavg[updatefs:532] sed -n 's/.*log=\([^:]*\).*/\1/p' +epprd_rg:clvaryonvg(1.175):datavg[updatefs:532] echo vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes +epprd_rg:clvaryonvg(1.179):datavg[updatefs:532] log_lv=/dev/epprdaloglv +epprd_rg:clvaryonvg(1.179):datavg[updatefs:533] [[ -n /dev/epprdaloglv ]] +epprd_rg:clvaryonvg(1.179):datavg[updatefs:533] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:clvaryonvg(1.179):datavg[updatefs:533] /usr/sbin/getlvcb -t epprdaloglv +epprd_rg:clvaryonvg(1.180):datavg[updatefs:533] 1> /dev/null 2>& 1 +epprd_rg:clvaryonvg(1.198):datavg[updatefs:535] : Only delete the file system information if +epprd_rg:clvaryonvg(1.198):datavg[updatefs:536] : 1. This logical volume is a file system +epprd_rg:clvaryonvg(1.198):datavg[updatefs:537] : 2. Its LVCB is readable +epprd_rg:clvaryonvg(1.198):datavg[updatefs:538] : 3. Its logs LVCB is readable +epprd_rg:clvaryonvg(1.199):datavg[updatefs:540] print -- vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.199):datavg[updatefs:540] grep -q :type= +epprd_rg:clvaryonvg(1.203):datavg[updatefs:545] /usr/sbin/imfs -lx mirrlogBlv +epprd_rg:clvaryonvg(1.207):datavg[updatefs:546] do_imfs=true +epprd_rg:clvaryonvg(1.207):datavg[updatefs:519] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.207):datavg[updatefs:521] clodmget -q 'name = epprdaloglv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.210):datavg[updatefs:521] [[ -n '' ]] +epprd_rg:clvaryonvg(1.210):datavg[updatefs:526] : Some checks here to ensure that, before we delete the information +epprd_rg:clvaryonvg(1.210):datavg[updatefs:527] : on a file system from /etc/filesystems, that we have the +epprd_rg:clvaryonvg(1.210):datavg[updatefs:528] : information to reconstruct it. +epprd_rg:clvaryonvg(1.210):datavg[updatefs:530] /usr/sbin/getlvcb -f epprdaloglv +epprd_rg:clvaryonvg(1.211):datavg[updatefs:530] LC_ALL=C +epprd_rg:clvaryonvg(1.228):datavg[updatefs:530] fs_info=' ' +epprd_rg:clvaryonvg(1.228):datavg[updatefs:531] [[ -n ' ' ]] +epprd_rg:clvaryonvg(1.228):datavg[updatefs:531] [[ ' ' != *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.228):datavg[updatefs:552] [[ -n true ]] +epprd_rg:clvaryonvg(1.228):datavg[updatefs:556] : Pick up any file system changes that may have happened when +epprd_rg:clvaryonvg(1.228):datavg[updatefs:557] : the volume group was owned by another node. That is, if a +epprd_rg:clvaryonvg(1.228):datavg[updatefs:558] : local change was made - not through C-SPOC, we whould have no +epprd_rg:clvaryonvg(1.228):datavg[updatefs:559] : indication it happened. +epprd_rg:clvaryonvg(1.228):datavg[updatefs:561] [[ -z '' ]] +epprd_rg:clvaryonvg(1.228):datavg[updatefs:563] /usr/sbin/imfs datavg +epprd_rg:clvaryonvg(1.938):datavg[updatefs:589] : For a valid file system configuration, the mount point in +epprd_rg:clvaryonvg(1.938):datavg[updatefs:590] : /etc/filesystems for the logical volume should match the +epprd_rg:clvaryonvg(1.938):datavg[updatefs:591] : label of the logical volume. The above imfs should have +epprd_rg:clvaryonvg(1.938):datavg[updatefs:592] : matched those two. Now, check that they match the label +epprd_rg:clvaryonvg(1.938):datavg[updatefs:593] : for the logical volume as saved in ODM. +epprd_rg:clvaryonvg(1.938):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.938):datavg[updatefs:600] clodmget -q 'name = saplv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(1.941):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(1.941):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(1.941):datavg[updatefs:607] /usr/sbin/getlvcb -f saplv +epprd_rg:clvaryonvg(1.959):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(1.959):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(1.959):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(1.959):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(1.959):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(1.959):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(1.959):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(1.959):datavg[updatefs:623] : Label and file system type from LVCB on disk for saplv +epprd_rg:clvaryonvg(1.960):datavg[updatefs:625] getlvcb -T -A saplv +epprd_rg:clvaryonvg(1.960):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(1.963):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(1.966):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(1.968):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(1.981):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(1.981):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(1.981):datavg[updatefs:632] : Mount point in /etc/filesystems for saplv +epprd_rg:clvaryonvg(1.982):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/saplv$' /etc/filesystems +epprd_rg:clvaryonvg(1.985):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(1.986):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(1.990):datavg[updatefs:634] fs_mount_point=/usr/sap +epprd_rg:clvaryonvg(1.990):datavg[updatefs:637] : CuAt label attribute for saplv +epprd_rg:clvaryonvg(1.990):datavg[updatefs:639] clodmget -q 'name = saplv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(1.993):datavg[updatefs:639] CuAt_label=/usr/sap +epprd_rg:clvaryonvg(1.994):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(1.996):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(1.999):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(1.999):datavg[updatefs:657] [[ -z /usr/sap ]] +epprd_rg:clvaryonvg(1.999):datavg[updatefs:657] [[ /usr/sap == None ]] +epprd_rg:clvaryonvg(1.999):datavg[updatefs:665] [[ /usr/sap == /usr/sap ]] +epprd_rg:clvaryonvg(1.999):datavg[updatefs:665] [[ /usr/sap != /usr/sap ]] +epprd_rg:clvaryonvg(1.999):datavg[updatefs:685] [[ /usr/sap != /usr/sap ]] +epprd_rg:clvaryonvg(1.999):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(1.999):datavg[updatefs:600] clodmget -q 'name = sapmntlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.002):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.002):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.002):datavg[updatefs:607] /usr/sbin/getlvcb -f sapmntlv +epprd_rg:clvaryonvg(2.019):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.019):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.019):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.019):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.020):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.020):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.020):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.020):datavg[updatefs:623] : Label and file system type from LVCB on disk for sapmntlv +epprd_rg:clvaryonvg(2.021):datavg[updatefs:625] getlvcb -T -A sapmntlv +epprd_rg:clvaryonvg(2.021):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.024):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.027):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.029):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.041):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.041):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.041):datavg[updatefs:632] : Mount point in /etc/filesystems for sapmntlv +epprd_rg:clvaryonvg(2.043):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/sapmntlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.045):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.047):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.050):datavg[updatefs:634] fs_mount_point=/sapmnt +epprd_rg:clvaryonvg(2.050):datavg[updatefs:637] : CuAt label attribute for sapmntlv +epprd_rg:clvaryonvg(2.050):datavg[updatefs:639] clodmget -q 'name = sapmntlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.054):datavg[updatefs:639] CuAt_label=/sapmnt +epprd_rg:clvaryonvg(2.055):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.056):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.059):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.060):datavg[updatefs:657] [[ -z /sapmnt ]] +epprd_rg:clvaryonvg(2.060):datavg[updatefs:657] [[ /sapmnt == None ]] +epprd_rg:clvaryonvg(2.060):datavg[updatefs:665] [[ /sapmnt == /sapmnt ]] +epprd_rg:clvaryonvg(2.060):datavg[updatefs:665] [[ /sapmnt != /sapmnt ]] +epprd_rg:clvaryonvg(2.060):datavg[updatefs:685] [[ /sapmnt != /sapmnt ]] +epprd_rg:clvaryonvg(2.060):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.060):datavg[updatefs:600] clodmget -q 'name = oraclelv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.063):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.063):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.063):datavg[updatefs:607] /usr/sbin/getlvcb -f oraclelv +epprd_rg:clvaryonvg(2.081):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.081):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.081):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.081):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.081):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.081):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.081):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.081):datavg[updatefs:623] : Label and file system type from LVCB on disk for oraclelv +epprd_rg:clvaryonvg(2.082):datavg[updatefs:625] getlvcb -T -A oraclelv +epprd_rg:clvaryonvg(2.083):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.086):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.089):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.091):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.103):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.103):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.103):datavg[updatefs:632] : Mount point in /etc/filesystems for oraclelv +epprd_rg:clvaryonvg(2.105):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/oraclelv$' /etc/filesystems +epprd_rg:clvaryonvg(2.107):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.109):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.112):datavg[updatefs:634] fs_mount_point=/oracle +epprd_rg:clvaryonvg(2.112):datavg[updatefs:637] : CuAt label attribute for oraclelv +epprd_rg:clvaryonvg(2.112):datavg[updatefs:639] clodmget -q 'name = oraclelv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.115):datavg[updatefs:639] CuAt_label=/oracle +epprd_rg:clvaryonvg(2.117):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.118):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.121):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.121):datavg[updatefs:657] [[ -z /oracle ]] +epprd_rg:clvaryonvg(2.121):datavg[updatefs:657] [[ /oracle == None ]] +epprd_rg:clvaryonvg(2.121):datavg[updatefs:665] [[ /oracle == /oracle ]] +epprd_rg:clvaryonvg(2.122):datavg[updatefs:665] [[ /oracle != /oracle ]] +epprd_rg:clvaryonvg(2.122):datavg[updatefs:685] [[ /oracle != /oracle ]] +epprd_rg:clvaryonvg(2.122):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.122):datavg[updatefs:600] clodmget -q 'name = epplv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.125):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.125):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.125):datavg[updatefs:607] /usr/sbin/getlvcb -f epplv +epprd_rg:clvaryonvg(2.142):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.142):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.142):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.142):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.142):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.142):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.142):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.142):datavg[updatefs:623] : Label and file system type from LVCB on disk for epplv +epprd_rg:clvaryonvg(2.143):datavg[updatefs:625] getlvcb -T -A epplv +epprd_rg:clvaryonvg(2.143):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.147):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.150):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.152):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.164):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.164):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.164):datavg[updatefs:632] : Mount point in /etc/filesystems for epplv +epprd_rg:clvaryonvg(2.166):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/epplv$' /etc/filesystems +epprd_rg:clvaryonvg(2.169):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.168):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.173):datavg[updatefs:634] fs_mount_point=/oracle/EPP +epprd_rg:clvaryonvg(2.173):datavg[updatefs:637] : CuAt label attribute for epplv +epprd_rg:clvaryonvg(2.174):datavg[updatefs:639] clodmget -q 'name = epplv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.177):datavg[updatefs:639] CuAt_label=/oracle/EPP +epprd_rg:clvaryonvg(2.178):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.179):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.183):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.183):datavg[updatefs:657] [[ -z /oracle/EPP ]] +epprd_rg:clvaryonvg(2.183):datavg[updatefs:657] [[ /oracle/EPP == None ]] +epprd_rg:clvaryonvg(2.183):datavg[updatefs:665] [[ /oracle/EPP == /oracle/EPP ]] +epprd_rg:clvaryonvg(2.183):datavg[updatefs:665] [[ /oracle/EPP != /oracle/EPP ]] +epprd_rg:clvaryonvg(2.183):datavg[updatefs:685] [[ /oracle/EPP != /oracle/EPP ]] +epprd_rg:clvaryonvg(2.183):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.183):datavg[updatefs:600] clodmget -q 'name = oraarchlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.186):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.187):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.187):datavg[updatefs:607] /usr/sbin/getlvcb -f oraarchlv +epprd_rg:clvaryonvg(2.204):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.204):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.204):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.204):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.204):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.204):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.204):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.204):datavg[updatefs:623] : Label and file system type from LVCB on disk for oraarchlv +epprd_rg:clvaryonvg(2.205):datavg[updatefs:625] getlvcb -T -A oraarchlv +epprd_rg:clvaryonvg(2.205):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.208):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.211):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.213):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.226):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.226):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.226):datavg[updatefs:632] : Mount point in /etc/filesystems for oraarchlv +epprd_rg:clvaryonvg(2.227):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/oraarchlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.229):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.231):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.234):datavg[updatefs:634] fs_mount_point=/oracle/EPP/oraarch +epprd_rg:clvaryonvg(2.234):datavg[updatefs:637] : CuAt label attribute for oraarchlv +epprd_rg:clvaryonvg(2.235):datavg[updatefs:639] clodmget -q 'name = oraarchlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.238):datavg[updatefs:639] CuAt_label=/oracle/EPP/oraarch +epprd_rg:clvaryonvg(2.239):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.241):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.244):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.244):datavg[updatefs:657] [[ -z /oracle/EPP/oraarch ]] +epprd_rg:clvaryonvg(2.244):datavg[updatefs:657] [[ /oracle/EPP/oraarch == None ]] +epprd_rg:clvaryonvg(2.244):datavg[updatefs:665] [[ /oracle/EPP/oraarch == /oracle/EPP/oraarch ]] +epprd_rg:clvaryonvg(2.244):datavg[updatefs:665] [[ /oracle/EPP/oraarch != /oracle/EPP/oraarch ]] +epprd_rg:clvaryonvg(2.244):datavg[updatefs:685] [[ /oracle/EPP/oraarch != /oracle/EPP/oraarch ]] +epprd_rg:clvaryonvg(2.244):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.244):datavg[updatefs:600] clodmget -q 'name = sapdata1lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.247):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.247):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.247):datavg[updatefs:607] /usr/sbin/getlvcb -f sapdata1lv +epprd_rg:clvaryonvg(2.265):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.265):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.265):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.265):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.265):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.265):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.265):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.265):datavg[updatefs:623] : Label and file system type from LVCB on disk for sapdata1lv +epprd_rg:clvaryonvg(2.266):datavg[updatefs:625] getlvcb -T -A sapdata1lv +epprd_rg:clvaryonvg(2.266):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.269):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.272):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.274):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.287):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.287):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.287):datavg[updatefs:632] : Mount point in /etc/filesystems for sapdata1lv +epprd_rg:clvaryonvg(2.290):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.291):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/sapdata1lv$' /etc/filesystems +epprd_rg:clvaryonvg(2.293):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.296):datavg[updatefs:634] fs_mount_point=/oracle/EPP/sapdata1 +epprd_rg:clvaryonvg(2.296):datavg[updatefs:637] : CuAt label attribute for sapdata1lv +epprd_rg:clvaryonvg(2.296):datavg[updatefs:639] clodmget -q 'name = sapdata1lv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.300):datavg[updatefs:639] CuAt_label=/oracle/EPP/sapdata1 +epprd_rg:clvaryonvg(2.301):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.303):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.306):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.306):datavg[updatefs:657] [[ -z /oracle/EPP/sapdata1 ]] +epprd_rg:clvaryonvg(2.306):datavg[updatefs:657] [[ /oracle/EPP/sapdata1 == None ]] +epprd_rg:clvaryonvg(2.306):datavg[updatefs:665] [[ /oracle/EPP/sapdata1 == /oracle/EPP/sapdata1 ]] +epprd_rg:clvaryonvg(2.306):datavg[updatefs:665] [[ /oracle/EPP/sapdata1 != /oracle/EPP/sapdata1 ]] +epprd_rg:clvaryonvg(2.306):datavg[updatefs:685] [[ /oracle/EPP/sapdata1 != /oracle/EPP/sapdata1 ]] +epprd_rg:clvaryonvg(2.306):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.306):datavg[updatefs:600] clodmget -q 'name = sapdata2lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.309):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.309):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.309):datavg[updatefs:607] /usr/sbin/getlvcb -f sapdata2lv +epprd_rg:clvaryonvg(2.333):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.333):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.333):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.333):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.333):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.333):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.333):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.333):datavg[updatefs:623] : Label and file system type from LVCB on disk for sapdata2lv +epprd_rg:clvaryonvg(2.334):datavg[updatefs:625] getlvcb -T -A sapdata2lv +epprd_rg:clvaryonvg(2.334):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.338):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.341):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.343):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.355):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.355):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.355):datavg[updatefs:632] : Mount point in /etc/filesystems for sapdata2lv +epprd_rg:clvaryonvg(2.357):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/sapdata2lv$' /etc/filesystems +epprd_rg:clvaryonvg(2.359):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.361):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.364):datavg[updatefs:634] fs_mount_point=/oracle/EPP/sapdata2 +epprd_rg:clvaryonvg(2.364):datavg[updatefs:637] : CuAt label attribute for sapdata2lv +epprd_rg:clvaryonvg(2.364):datavg[updatefs:639] clodmget -q 'name = sapdata2lv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.368):datavg[updatefs:639] CuAt_label=/oracle/EPP/sapdata2 +epprd_rg:clvaryonvg(2.369):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.370):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.374):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.374):datavg[updatefs:657] [[ -z /oracle/EPP/sapdata2 ]] +epprd_rg:clvaryonvg(2.374):datavg[updatefs:657] [[ /oracle/EPP/sapdata2 == None ]] +epprd_rg:clvaryonvg(2.374):datavg[updatefs:665] [[ /oracle/EPP/sapdata2 == /oracle/EPP/sapdata2 ]] +epprd_rg:clvaryonvg(2.374):datavg[updatefs:665] [[ /oracle/EPP/sapdata2 != /oracle/EPP/sapdata2 ]] +epprd_rg:clvaryonvg(2.374):datavg[updatefs:685] [[ /oracle/EPP/sapdata2 != /oracle/EPP/sapdata2 ]] +epprd_rg:clvaryonvg(2.374):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.374):datavg[updatefs:600] clodmget -q 'name = sapdata3lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.377):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.377):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.377):datavg[updatefs:607] /usr/sbin/getlvcb -f sapdata3lv +epprd_rg:clvaryonvg(2.394):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.395):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.395):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.395):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.395):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.395):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.395):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.395):datavg[updatefs:623] : Label and file system type from LVCB on disk for sapdata3lv +epprd_rg:clvaryonvg(2.396):datavg[updatefs:625] getlvcb -T -A sapdata3lv +epprd_rg:clvaryonvg(2.396):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.399):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.402):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.404):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.417):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.417):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.417):datavg[updatefs:632] : Mount point in /etc/filesystems for sapdata3lv +epprd_rg:clvaryonvg(2.418):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/sapdata3lv$' /etc/filesystems +epprd_rg:clvaryonvg(2.420):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.422):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.426):datavg[updatefs:634] fs_mount_point=/oracle/EPP/sapdata3 +epprd_rg:clvaryonvg(2.426):datavg[updatefs:637] : CuAt label attribute for sapdata3lv +epprd_rg:clvaryonvg(2.426):datavg[updatefs:639] clodmget -q 'name = sapdata3lv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.429):datavg[updatefs:639] CuAt_label=/oracle/EPP/sapdata3 +epprd_rg:clvaryonvg(2.431):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.432):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.435):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.435):datavg[updatefs:657] [[ -z /oracle/EPP/sapdata3 ]] +epprd_rg:clvaryonvg(2.435):datavg[updatefs:657] [[ /oracle/EPP/sapdata3 == None ]] +epprd_rg:clvaryonvg(2.435):datavg[updatefs:665] [[ /oracle/EPP/sapdata3 == /oracle/EPP/sapdata3 ]] +epprd_rg:clvaryonvg(2.435):datavg[updatefs:665] [[ /oracle/EPP/sapdata3 != /oracle/EPP/sapdata3 ]] +epprd_rg:clvaryonvg(2.435):datavg[updatefs:685] [[ /oracle/EPP/sapdata3 != /oracle/EPP/sapdata3 ]] +epprd_rg:clvaryonvg(2.435):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.435):datavg[updatefs:600] clodmget -q 'name = sapdata4lv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.438):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.439):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.439):datavg[updatefs:607] /usr/sbin/getlvcb -f sapdata4lv +epprd_rg:clvaryonvg(2.456):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.456):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.456):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.456):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.456):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.456):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.456):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.456):datavg[updatefs:623] : Label and file system type from LVCB on disk for sapdata4lv +epprd_rg:clvaryonvg(2.457):datavg[updatefs:625] getlvcb -T -A sapdata4lv +epprd_rg:clvaryonvg(2.457):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.460):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.463):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.465):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.478):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.478):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.478):datavg[updatefs:632] : Mount point in /etc/filesystems for sapdata4lv +epprd_rg:clvaryonvg(2.480):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/sapdata4lv$' /etc/filesystems +epprd_rg:clvaryonvg(2.482):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.484):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.487):datavg[updatefs:634] fs_mount_point=/oracle/EPP/sapdata4 +epprd_rg:clvaryonvg(2.487):datavg[updatefs:637] : CuAt label attribute for sapdata4lv +epprd_rg:clvaryonvg(2.487):datavg[updatefs:639] clodmget -q 'name = sapdata4lv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.490):datavg[updatefs:639] CuAt_label=/oracle/EPP/sapdata4 +epprd_rg:clvaryonvg(2.492):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.493):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.496):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.496):datavg[updatefs:657] [[ -z /oracle/EPP/sapdata4 ]] +epprd_rg:clvaryonvg(2.496):datavg[updatefs:657] [[ /oracle/EPP/sapdata4 == None ]] +epprd_rg:clvaryonvg(2.496):datavg[updatefs:665] [[ /oracle/EPP/sapdata4 == /oracle/EPP/sapdata4 ]] +epprd_rg:clvaryonvg(2.496):datavg[updatefs:665] [[ /oracle/EPP/sapdata4 != /oracle/EPP/sapdata4 ]] +epprd_rg:clvaryonvg(2.496):datavg[updatefs:685] [[ /oracle/EPP/sapdata4 != /oracle/EPP/sapdata4 ]] +epprd_rg:clvaryonvg(2.496):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.496):datavg[updatefs:600] clodmget -q 'name = boardlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.500):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.500):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.500):datavg[updatefs:607] /usr/sbin/getlvcb -f boardlv +epprd_rg:clvaryonvg(2.517):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.517):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.517):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.517):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.517):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.517):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.517):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.517):datavg[updatefs:623] : Label and file system type from LVCB on disk for boardlv +epprd_rg:clvaryonvg(2.518):datavg[updatefs:625] getlvcb -T -A boardlv +epprd_rg:clvaryonvg(2.518):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.521):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.524):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.526):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.539):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.539):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.539):datavg[updatefs:632] : Mount point in /etc/filesystems for boardlv +epprd_rg:clvaryonvg(2.541):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/boardlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.543):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.545):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.548):datavg[updatefs:634] fs_mount_point=/board_org +epprd_rg:clvaryonvg(2.548):datavg[updatefs:637] : CuAt label attribute for boardlv +epprd_rg:clvaryonvg(2.548):datavg[updatefs:639] clodmget -q 'name = boardlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.551):datavg[updatefs:639] CuAt_label=/board_org +epprd_rg:clvaryonvg(2.553):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.554):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.557):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.557):datavg[updatefs:657] [[ -z /board_org ]] +epprd_rg:clvaryonvg(2.557):datavg[updatefs:657] [[ /board_org == None ]] +epprd_rg:clvaryonvg(2.557):datavg[updatefs:665] [[ /board_org == /board_org ]] +epprd_rg:clvaryonvg(2.557):datavg[updatefs:665] [[ /board_org != /board_org ]] +epprd_rg:clvaryonvg(2.558):datavg[updatefs:685] [[ /board_org != /board_org ]] +epprd_rg:clvaryonvg(2.558):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.558):datavg[updatefs:600] clodmget -q 'name = origlogAlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.561):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.561):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.561):datavg[updatefs:607] /usr/sbin/getlvcb -f origlogAlv +epprd_rg:clvaryonvg(2.578):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.578):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.578):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.578):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.578):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.578):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.578):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.578):datavg[updatefs:623] : Label and file system type from LVCB on disk for origlogAlv +epprd_rg:clvaryonvg(2.579):datavg[updatefs:625] getlvcb -T -A origlogAlv +epprd_rg:clvaryonvg(2.580):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.583):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.586):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.588):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.600):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.600):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.600):datavg[updatefs:632] : Mount point in /etc/filesystems for origlogAlv +epprd_rg:clvaryonvg(2.602):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/origlogAlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.604):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.606):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.609):datavg[updatefs:634] fs_mount_point=/oracle/EPP/origlogA +epprd_rg:clvaryonvg(2.609):datavg[updatefs:637] : CuAt label attribute for origlogAlv +epprd_rg:clvaryonvg(2.609):datavg[updatefs:639] clodmget -q 'name = origlogAlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.613):datavg[updatefs:639] CuAt_label=/oracle/EPP/origlogA +epprd_rg:clvaryonvg(2.614):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.615):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.618):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.619):datavg[updatefs:657] [[ -z /oracle/EPP/origlogA ]] +epprd_rg:clvaryonvg(2.619):datavg[updatefs:657] [[ /oracle/EPP/origlogA == None ]] +epprd_rg:clvaryonvg(2.619):datavg[updatefs:665] [[ /oracle/EPP/origlogA == /oracle/EPP/origlogA ]] +epprd_rg:clvaryonvg(2.619):datavg[updatefs:665] [[ /oracle/EPP/origlogA != /oracle/EPP/origlogA ]] +epprd_rg:clvaryonvg(2.619):datavg[updatefs:685] [[ /oracle/EPP/origlogA != /oracle/EPP/origlogA ]] +epprd_rg:clvaryonvg(2.619):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.619):datavg[updatefs:600] clodmget -q 'name = origlogBlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.622):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.622):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.622):datavg[updatefs:607] /usr/sbin/getlvcb -f origlogBlv +epprd_rg:clvaryonvg(2.640):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.640):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.640):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.640):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.640):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.640):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.640):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.640):datavg[updatefs:623] : Label and file system type from LVCB on disk for origlogBlv +epprd_rg:clvaryonvg(2.641):datavg[updatefs:625] getlvcb -T -A origlogBlv +epprd_rg:clvaryonvg(2.641):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.644):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.647):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.649):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.662):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.662):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.662):datavg[updatefs:632] : Mount point in /etc/filesystems for origlogBlv +epprd_rg:clvaryonvg(2.663):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/origlogBlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.666):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.667):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.671):datavg[updatefs:634] fs_mount_point=/oracle/EPP/origlogB +epprd_rg:clvaryonvg(2.671):datavg[updatefs:637] : CuAt label attribute for origlogBlv +epprd_rg:clvaryonvg(2.671):datavg[updatefs:639] clodmget -q 'name = origlogBlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.674):datavg[updatefs:639] CuAt_label=/oracle/EPP/origlogB +epprd_rg:clvaryonvg(2.676):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.677):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.680):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.680):datavg[updatefs:657] [[ -z /oracle/EPP/origlogB ]] +epprd_rg:clvaryonvg(2.680):datavg[updatefs:657] [[ /oracle/EPP/origlogB == None ]] +epprd_rg:clvaryonvg(2.680):datavg[updatefs:665] [[ /oracle/EPP/origlogB == /oracle/EPP/origlogB ]] +epprd_rg:clvaryonvg(2.680):datavg[updatefs:665] [[ /oracle/EPP/origlogB != /oracle/EPP/origlogB ]] +epprd_rg:clvaryonvg(2.680):datavg[updatefs:685] [[ /oracle/EPP/origlogB != /oracle/EPP/origlogB ]] +epprd_rg:clvaryonvg(2.680):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.680):datavg[updatefs:600] clodmget -q 'name = mirrlogAlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.683):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.683):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.683):datavg[updatefs:607] /usr/sbin/getlvcb -f mirrlogAlv +epprd_rg:clvaryonvg(2.701):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.701):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.701):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.701):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.701):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.701):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.701):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.701):datavg[updatefs:623] : Label and file system type from LVCB on disk for mirrlogAlv +epprd_rg:clvaryonvg(2.702):datavg[updatefs:625] getlvcb -T -A mirrlogAlv +epprd_rg:clvaryonvg(2.702):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.705):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.708):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.710):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.723):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.723):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.723):datavg[updatefs:632] : Mount point in /etc/filesystems for mirrlogAlv +epprd_rg:clvaryonvg(2.725):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/mirrlogAlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.727):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.729):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.732):datavg[updatefs:634] fs_mount_point=/oracle/EPP/mirrlogA +epprd_rg:clvaryonvg(2.732):datavg[updatefs:637] : CuAt label attribute for mirrlogAlv +epprd_rg:clvaryonvg(2.732):datavg[updatefs:639] clodmget -q 'name = mirrlogAlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.735):datavg[updatefs:639] CuAt_label=/oracle/EPP/mirrlogA +epprd_rg:clvaryonvg(2.737):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.738):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.742):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.742):datavg[updatefs:657] [[ -z /oracle/EPP/mirrlogA ]] +epprd_rg:clvaryonvg(2.742):datavg[updatefs:657] [[ /oracle/EPP/mirrlogA == None ]] +epprd_rg:clvaryonvg(2.742):datavg[updatefs:665] [[ /oracle/EPP/mirrlogA == /oracle/EPP/mirrlogA ]] +epprd_rg:clvaryonvg(2.742):datavg[updatefs:665] [[ /oracle/EPP/mirrlogA != /oracle/EPP/mirrlogA ]] +epprd_rg:clvaryonvg(2.742):datavg[updatefs:685] [[ /oracle/EPP/mirrlogA != /oracle/EPP/mirrlogA ]] +epprd_rg:clvaryonvg(2.742):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.742):datavg[updatefs:600] clodmget -q 'name = mirrlogBlv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.745):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.745):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.745):datavg[updatefs:607] /usr/sbin/getlvcb -f mirrlogBlv +epprd_rg:clvaryonvg(2.763):datavg[updatefs:607] fs_info=vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' +epprd_rg:clvaryonvg(2.763):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.763):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.763):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.763):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.763):datavg[updatefs:618] [[ -z vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' ]] +epprd_rg:clvaryonvg(2.763):datavg[updatefs:618] [[ vfs='jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.763):datavg[updatefs:623] : Label and file system type from LVCB on disk for mirrlogBlv +epprd_rg:clvaryonvg(2.764):datavg[updatefs:625] getlvcb -T -A mirrlogBlv +epprd_rg:clvaryonvg(2.764):datavg[updatefs:625] LC_ALL=C +epprd_rg:clvaryonvg(2.768):datavg[updatefs:625] egrep -w 'label =|type =' +epprd_rg:clvaryonvg(2.771):datavg[updatefs:625] paste -s - - +epprd_rg:clvaryonvg(2.773):datavg[updatefs:625] read skip skip lvcb_label skip skip lvcb_type rest +epprd_rg:clvaryonvg(2.785):datavg[updatefs:626] [[ jfs2 != jfs ]] +epprd_rg:clvaryonvg(2.785):datavg[updatefs:626] [[ jfs2 != jfs2 ]] +epprd_rg:clvaryonvg(2.785):datavg[updatefs:632] : Mount point in /etc/filesystems for mirrlogBlv +epprd_rg:clvaryonvg(2.786):datavg[updatefs:634] egrep -p '^([[:space:]])*dev([[:space:]])*= /dev/mirrlogBlv$' /etc/filesystems +epprd_rg:clvaryonvg(2.789):datavg[updatefs:634] cut -f1 -d: +epprd_rg:clvaryonvg(2.791):datavg[updatefs:634] head -1 +epprd_rg:clvaryonvg(2.794):datavg[updatefs:634] fs_mount_point=/oracle/EPP/mirrlogB +epprd_rg:clvaryonvg(2.794):datavg[updatefs:637] : CuAt label attribute for mirrlogBlv +epprd_rg:clvaryonvg(2.794):datavg[updatefs:639] clodmget -q 'name = mirrlogBlv and attribute = label' -f value -n CuAt +epprd_rg:clvaryonvg(2.798):datavg[updatefs:639] CuAt_label=/oracle/EPP/mirrlogB +epprd_rg:clvaryonvg(2.799):datavg[updatefs:640] print -- CuAt_label +epprd_rg:clvaryonvg(2.800):datavg[updatefs:640] wc -l +epprd_rg:clvaryonvg(2.803):datavg[updatefs:640] (( 1 != 1 )) +epprd_rg:clvaryonvg(2.804):datavg[updatefs:657] [[ -z /oracle/EPP/mirrlogB ]] +epprd_rg:clvaryonvg(2.804):datavg[updatefs:657] [[ /oracle/EPP/mirrlogB == None ]] +epprd_rg:clvaryonvg(2.804):datavg[updatefs:665] [[ /oracle/EPP/mirrlogB == /oracle/EPP/mirrlogB ]] +epprd_rg:clvaryonvg(2.804):datavg[updatefs:665] [[ /oracle/EPP/mirrlogB != /oracle/EPP/mirrlogB ]] +epprd_rg:clvaryonvg(2.804):datavg[updatefs:685] [[ /oracle/EPP/mirrlogB != /oracle/EPP/mirrlogB ]] +epprd_rg:clvaryonvg(2.804):datavg[updatefs:598] : Skip filesystem update for raw logical volumes +epprd_rg:clvaryonvg(2.804):datavg[updatefs:600] clodmget -q 'name = epprdaloglv and attribute = type and value = raw' -f value -n CuAt +epprd_rg:clvaryonvg(2.807):datavg[updatefs:600] [[ -n '' ]] +epprd_rg:clvaryonvg(2.807):datavg[updatefs:605] : Skip logical volumes for which getlvcb fails +epprd_rg:clvaryonvg(2.807):datavg[updatefs:607] /usr/sbin/getlvcb -f epprdaloglv +epprd_rg:clvaryonvg(2.824):datavg[updatefs:607] fs_info=' ' +epprd_rg:clvaryonvg(2.824):datavg[updatefs:608] cmd_rc=0 +epprd_rg:clvaryonvg(2.824):datavg[updatefs:608] typeset -i cmd_rc +epprd_rg:clvaryonvg(2.824):datavg[updatefs:609] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.824):datavg[updatefs:615] : Skip logical volumes not associated with file systems +epprd_rg:clvaryonvg(2.824):datavg[updatefs:618] [[ -z ' ' ]] +epprd_rg:clvaryonvg(2.824):datavg[updatefs:618] [[ ' ' == *([[:space:]]) ]] +epprd_rg:clvaryonvg(2.825):datavg[updatefs:620] continue +epprd_rg:clvaryonvg(2.825):datavg[1641] : At this point, the volume should be varied on, so get the current +epprd_rg:clvaryonvg(2.825):datavg[1642] : timestamp if needed +epprd_rg:clvaryonvg(2.825):datavg[1644] vgdatimestamps +epprd_rg:clvaryonvg(2.825):datavg[vgdatimestamps:201] PS4_FUNC=vgdatimestamps +epprd_rg:clvaryonvg(2.825):datavg[vgdatimestamps:201] typeset PS4_FUNC +epprd_rg:clvaryonvg(2.825):datavg[vgdatimestamps:202] [[ high == high ]] +epprd_rg:clvaryonvg(2.825):datavg[vgdatimestamps:202] set -x +epprd_rg:clvaryonvg(2.825):datavg[vgdatimestamps:203] set -u +epprd_rg:clvaryonvg(2.825):datavg[vgdatimestamps:206] : See what timestamp LVM has recorded from the last time it checked +epprd_rg:clvaryonvg(2.825):datavg[vgdatimestamps:207] : the disks +epprd_rg:clvaryonvg(2.825):datavg[vgdatimestamps:209] /usr/sbin/getlvodm -T 00c44af100004b00000001851e9dc053 +epprd_rg:clvaryonvg(2.826):datavg[vgdatimestamps:209] 2> /dev/null +epprd_rg:clvaryonvg(2.828):datavg[vgdatimestamps:209] TS_FROM_ODM=6517168c00d56747 +epprd_rg:clvaryonvg(2.828):datavg[vgdatimestamps:212] : Check to see if HACMP is maintaining a timestamp for this volume group +epprd_rg:clvaryonvg(2.828):datavg[vgdatimestamps:213] : Needed for some older volume groups +epprd_rg:clvaryonvg(2.828):datavg[vgdatimestamps:215] [[ -s /usr/es/sbin/cluster/etc/vg/datavg.tstamp ]] +epprd_rg:clvaryonvg(2.828):datavg[vgdatimestamps:234] : Get the time stamp from the actual disk +epprd_rg:clvaryonvg(2.828):datavg[vgdatimestamps:236] clvgdats /dev/datavg +epprd_rg:clvaryonvg(2.829):datavg[vgdatimestamps:236] 2> /dev/null +epprd_rg:clvaryonvg(2.838):datavg[vgdatimestamps:236] TS_FROM_DISK=6517168c00d56747 +epprd_rg:clvaryonvg(2.838):datavg[vgdatimestamps:237] clvgdats_rc=0 +epprd_rg:clvaryonvg(2.838):datavg[vgdatimestamps:238] (( 0 != 0 )) +epprd_rg:clvaryonvg(2.838):datavg[vgdatimestamps:247] [[ -z 6517168c00d56747 ]] +epprd_rg:clvaryonvg(2.838):datavg[1645] [[ -z 6517168c00d56747 ]] +epprd_rg:clvaryonvg(2.838):datavg[1656] : Finally, leave the volume in the requested state - on or off +epprd_rg:clvaryonvg(2.838):datavg[1658] [[ FALSE == TRUE ]] +epprd_rg:clvaryonvg(2.839):datavg[1665] (( 0 == 0 )) +epprd_rg:clvaryonvg(2.839):datavg[1668] : Synchronize time stamps globally +epprd_rg:clvaryonvg(2.839):datavg[1670] cl_update_vg_odm_ts -o datavg +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[77] version=1.13 +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[121] o_flag='' +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[122] f_flag='' +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[123] getopts :of option +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[126] : Local timestamps should be good, since volume group was +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[127] : just varyied on or off +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[128] o_flag=TRUE +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[123] getopts :of option +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[142] shift 1 +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[144] vg_name=datavg +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[145] [[ -z datavg ]] +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[151] shift +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[152] node_list='' +epprd_rg:cl_update_vg_odm_ts(0.001):datavg[153] /usr/es/sbin/cluster/utilities/cl_get_path all +epprd_rg:cl_update_vg_odm_ts(0.004):datavg[153] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin +epprd_rg:cl_update_vg_odm_ts(0.004):datavg[155] [[ -z '' ]] +epprd_rg:cl_update_vg_odm_ts(0.004):datavg[158] : Check to see if this update is necessary - some LVM levels automatically +epprd_rg:cl_update_vg_odm_ts(0.004):datavg[159] : update volume group timestamps clusterwide. +epprd_rg:cl_update_vg_odm_ts(0.004):datavg[163] instfix -iqk IV74100 +epprd_rg:cl_update_vg_odm_ts(0.005):datavg[163] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.019):datavg[164] instfix -iqk IV74883 +epprd_rg:cl_update_vg_odm_ts(0.019):datavg[164] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.026):datavg[165] instfix -iqk IV74698 +epprd_rg:cl_update_vg_odm_ts(0.027):datavg[165] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.034):datavg[166] instfix -iqk IV74246 +epprd_rg:cl_update_vg_odm_ts(0.035):datavg[166] 1> /dev/null 2>& 1 +epprd_rg:cl_update_vg_odm_ts(0.041):datavg[174] emgr -l -L IV74883 +epprd_rg:cl_update_vg_odm_ts(0.042):datavg[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.313):datavg[174] emgr -l -L IV74698 +epprd_rg:cl_update_vg_odm_ts(0.314):datavg[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.581):datavg[174] emgr -l -L IV74246 +epprd_rg:cl_update_vg_odm_ts(0.582):datavg[174] 2> /dev/null +epprd_rg:cl_update_vg_odm_ts(0.849):datavg[183] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_update_vg_odm_ts(0.849):datavg[184] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_update_vg_odm_ts(0.849):datavg[185] : 99.99.999.999 +epprd_rg:cl_update_vg_odm_ts(0.849):datavg[187] typeset -li V R M F +epprd_rg:cl_update_vg_odm_ts(0.849):datavg[188] typeset -Z2 V +epprd_rg:cl_update_vg_odm_ts(0.849):datavg[189] typeset -Z2 R +epprd_rg:cl_update_vg_odm_ts(0.849):datavg[190] typeset -Z3 M +epprd_rg:cl_update_vg_odm_ts(0.849):datavg[191] typeset -Z3 F +epprd_rg:cl_update_vg_odm_ts(0.849):datavg[192] lvm_lvl6=601008015 +epprd_rg:cl_update_vg_odm_ts(0.849):datavg[192] typeset -li lvm_lvl6 +epprd_rg:cl_update_vg_odm_ts(0.849):datavg[194] lvm_lvl7=701003046 +epprd_rg:cl_update_vg_odm_ts(0.849):datavg[194] typeset -li lvm_lvl7 +epprd_rg:cl_update_vg_odm_ts(0.849):datavg[195] VRMF=0 +epprd_rg:cl_update_vg_odm_ts(0.849):datavg[195] typeset -li VRMF +epprd_rg:cl_update_vg_odm_ts(0.849):datavg[198] : Here try and figure out what level of LVM is installed +epprd_rg:cl_update_vg_odm_ts(0.850):datavg[200] lslpp -lcqOr bos.rte.lvm +epprd_rg:cl_update_vg_odm_ts(0.851):datavg[200] cut -f3 -d: +epprd_rg:cl_update_vg_odm_ts(0.851):datavg[200] read V R M F +epprd_rg:cl_update_vg_odm_ts(0.851):datavg[200] IFS=. +epprd_rg:cl_update_vg_odm_ts(0.852):datavg[201] VRMF=0702005101 +epprd_rg:cl_update_vg_odm_ts(0.852):datavg[203] (( 7 == 6 && 702005101 >= 601008015 )) +epprd_rg:cl_update_vg_odm_ts(0.853):datavg[204] (( 702005101 >= 701003046 )) +epprd_rg:cl_update_vg_odm_ts(0.853):datavg[207] : LVM at a level in which timestamp update is unnecessary +epprd_rg:cl_update_vg_odm_ts(0.853):datavg[209] return 0 +epprd_rg:clvaryonvg(3.696):datavg[1674] : On successful varyon, clean up any files used to track errors with +epprd_rg:clvaryonvg(3.696):datavg[1675] : this volume group +epprd_rg:clvaryonvg(3.696):datavg[1677] rm -f /usr/es/sbin/cluster/etc/vg/datavg.desc /usr/es/sbin/cluster/etc/vg/datavg.replay /usr/es/sbin/cluster/etc/vg/datavg.perms /usr/es/sbin/cluster/etc/vg/datavg.tstamp /usr/es/sbin/cluster/etc/vg/datavg.fail +epprd_rg:clvaryonvg(3.699):datavg[1680] : Note that a sync has not been done on the volume group at this point. +epprd_rg:clvaryonvg(3.699):datavg[1681] : A sync is kicked off in cl_sync_vgs, once any filesystem mounts are +epprd_rg:clvaryonvg(3.699):datavg[1682] : complete. A sync at this time would interfere with the mounts +epprd_rg:clvaryonvg(3.699):datavg[1685] return 0 +epprd_rg:cl_activate_vgs(3.785):datavg[vgs_chk:103] ERRMSG=$'cl_set_vg_fence_height[126]: version @(#)10\t1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37\ncl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY)\ncl_set_vg_fence_height[214]: read(datavg, 16)\ncl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid)\ncl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0))' +epprd_rg:cl_activate_vgs(3.785):datavg[vgs_chk:104] RC=0 +epprd_rg:cl_activate_vgs(3.785):datavg[vgs_chk:107] (( 0 == 1 || 0 == 20 )) +epprd_rg:cl_activate_vgs(3.785):datavg[vgs_chk:115] : exit status of clvaryonvg -n datavg: 0 +epprd_rg:cl_activate_vgs(3.785):datavg[vgs_chk:117] [[ -n $'cl_set_vg_fence_height[126]: version @(#)10\t1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37\ncl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY)\ncl_set_vg_fence_height[214]: read(datavg, 16)\ncl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid)\ncl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0))' ]] +epprd_rg:cl_activate_vgs(3.785):datavg[vgs_chk:117] (( 0 != 1 )) +epprd_rg:cl_activate_vgs(3.785):datavg[vgs_chk:119] cl_echo 286 $'cl_activate_vgs: Successful clvaryonvg of datavg with message cl_set_vg_fence_height[126]: version @(#)10\t1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37\ncl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY)\ncl_set_vg_fence_height[214]: read(datavg, 16)\ncl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid)\ncl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0)).' cl_activate_vgs datavg 'cl_set_vg_fence_height[126]:' version '@(#)10' 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 'cl_set_vg_fence_height[180]:' 'open(/usr/es/sbin/cluster/etc/vg/datavg.uuid,' 'O_RDONLY)' 'cl_set_vg_fence_height[214]:' 'read(datavg,' '16)' 'cl_set_vg_fence_height[237]:' 'close(/usr/es/sbin/cluster/etc/vg/datavg.uuid)' 'cl_set_vg_fence_height[265]:' 'sfwSetFenceGroup(vg=datavg' uuid=ec2db4422261eae02091227fb9e53c88 height='rw(0))' Sep 30 2023 03:25:19cl_activate_vgs: Successful clvaryonvg of datavg with message cl_set_vg_fence_height[126]: version @(#)10 1.5 src/43haes/usr/sbin/cluster/events/utils/cl_set_vg_fence_height.c, hacmp, 61haes_r714 4/12/13 13:18:37 cl_set_vg_fence_height[180]: open(/usr/es/sbin/cluster/etc/vg/datavg.uuid, O_RDONLY) cl_set_vg_fence_height[214]: read(datavg, 16) cl_set_vg_fence_height[237]: close(/usr/es/sbin/cluster/etc/vg/datavg.uuid) cl_set_vg_fence_height[265]: sfwSetFenceGroup(vg=datavg uuid=ec2db4422261eae02091227fb9e53c88 height=rw(0)).+epprd_rg:cl_activate_vgs(3.804):datavg[vgs_chk:123] [[ 0 != 0 ]] +epprd_rg:cl_activate_vgs(3.804):datavg[vgs_chk:127] amlog_trace '' 'Activating Volume Group|datavg' +epprd_rg:cl_activate_vgs(3.804):datavg[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_vgs(3.805):datavg[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_vgs(3.830):datavg[amlog_trace:319] cltime +epprd_rg:cl_activate_vgs(3.832):datavg[amlog_trace:319] DATE=2023-09-30T03:25:19.553838 +epprd_rg:cl_activate_vgs(3.832):datavg[amlog_trace:320] echo '|2023-09-30T03:25:19.553838|INFO: Activating Volume Group|datavg' +epprd_rg:cl_activate_vgs(3.833):datavg[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_vgs(3.833):datavg[vgs_chk:132] echo datavg 0 +epprd_rg:cl_activate_vgs(3.833):datavg[vgs_chk:132] 1>> /tmp/_activate_vgs.tmp +epprd_rg:cl_activate_vgs(3.833):datavg[vgs_chk:133] return 0 +epprd_rg:cl_activate_vgs:datavg[vgs_list:198] unset PS4_LOOP PS4_TIMER +epprd_rg:cl_activate_vgs[304] wait +epprd_rg:cl_activate_vgs[310] ALLNOERRVGS=All_nonerror_volume_groups +epprd_rg:cl_activate_vgs[311] cl_RMupdate resource_up All_nonerror_volume_groups cl_activate_vgs 2023-09-30T03:25:19.577016 2023-09-30T03:25:19.581584 +epprd_rg:cl_activate_vgs[318] [[ -f /tmp/_activate_vgs.tmp ]] +epprd_rg:cl_activate_vgs[320] grep ' 1' /tmp/_activate_vgs.tmp +epprd_rg:cl_activate_vgs[329] rm -f /tmp/_activate_vgs.tmp +epprd_rg:cl_activate_vgs[332] exit 0 +epprd_rg:process_resources[process_volume_groups:2584] RC=0 +epprd_rg:process_resources[process_volume_groups:2585] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[process_volume_groups:2598] (( 0 != 0 )) +epprd_rg:process_resources[process_volume_groups:2627] return 0 +epprd_rg:process_resources[process_volume_groups_main:2556] STAT=0 +epprd_rg:process_resources[process_volume_groups_main:2559] return 0 +epprd_rg:process_resources[3572] RC=0 +epprd_rg:process_resources[3573] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-09-30T03:25:19.600332 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=LOGREDO ACTION=ACQUIRE VOLUME_GROUPS='"datavg"' RESOURCE_GROUPS='"epprd_rg' '"' +epprd_rg:process_resources[1] JOB_TYPE=LOGREDO +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] VOLUME_GROUPS=datavg +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ LOGREDO == RELEASE ]] +epprd_rg:process_resources[3360] [[ LOGREDO == ONLINE ]] +epprd_rg:process_resources[3634] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[3635] logredo_volume_groups +epprd_rg:process_resources[logredo_volume_groups:2745] PS4_FUNC=logredo_volume_groups +epprd_rg:process_resources[logredo_volume_groups:2745] typeset PS4_FUNC +epprd_rg:process_resources(4.783)[logredo_volume_groups:2746] PS4_TIMER=true +epprd_rg:process_resources(4.784)[logredo_volume_groups:2746] typeset PS4_TIMER +epprd_rg:process_resources(4.784)[logredo_volume_groups:2747] [[ high == high ]] +epprd_rg:process_resources(4.784)[logredo_volume_groups:2747] set -x +epprd_rg:process_resources(4.784)[logredo_volume_groups:2749] TMP_FILE=/var/hacmp/log/.process_resources_logredo.19923274 +epprd_rg:process_resources(4.784)[logredo_volume_groups:2749] export TMP_FILE +epprd_rg:process_resources(4.784)[logredo_volume_groups:2750] rm -f '/var/hacmp/log/.process_resources_logredo*' +epprd_rg:process_resources(4.787)[logredo_volume_groups:2752] STAT=0 +epprd_rg:process_resources(4.787)[logredo_volume_groups:2755] export GROUPNAME +epprd_rg:process_resources(4.788)[logredo_volume_groups:2757] get_list_head datavg +epprd_rg:process_resources(4.788)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(4.788)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(4.788)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(4.788)[get_list_head:60] set -x +epprd_rg:process_resources(4.789)[get_list_head:61] echo datavg +epprd_rg:process_resources(4.789)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(4.789)[get_list_head:61] IFS=: +epprd_rg:process_resources(4.790)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(4.790)[get_list_head:62] echo datavg +epprd_rg:process_resources(4.788)[logredo_volume_groups:2757] read LIST_OF_VOLUME_GROUPS_FOR_RG +epprd_rg:process_resources(4.794)[logredo_volume_groups:2758] get_list_tail datavg +epprd_rg:process_resources(4.794)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(4.794)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(4.794)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(4.795)[get_list_tail:68] set -x +epprd_rg:process_resources(4.796)[get_list_tail:69] echo datavg +epprd_rg:process_resources(4.795)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(4.796)[get_list_tail:69] IFS=: +epprd_rg:process_resources(4.796)[get_list_tail:70] echo +epprd_rg:process_resources(4.793)[logredo_volume_groups:2758] read VOLUME_GROUPS +epprd_rg:process_resources(4.796)[logredo_volume_groups:2761] : Run logredo on all JFS/JFS2 log devices to assure FS consistency +epprd_rg:process_resources(4.796)[logredo_volume_groups:2763] ALL_LVs='' +epprd_rg:process_resources(4.796)[logredo_volume_groups:2764] lv_all='' +epprd_rg:process_resources(4.796)[logredo_volume_groups:2765] mount_fs='' +epprd_rg:process_resources(4.796)[logredo_volume_groups:2766] fsck_check='' +epprd_rg:process_resources(4.796)[logredo_volume_groups:2767] MOUNTGUARD='' +epprd_rg:process_resources(4.796)[logredo_volume_groups:2768] FMMOUNT_OUT='' +epprd_rg:process_resources(4.796)[logredo_volume_groups:2769] FMMOUNT='' +epprd_rg:process_resources(4.798)[logredo_volume_groups:2772] tail +3 +epprd_rg:process_resources(4.798)[logredo_volume_groups:2772] lsvg -lL datavg +epprd_rg:process_resources(4.798)[logredo_volume_groups:2772] LC_ALL=C +epprd_rg:process_resources(4.799)[logredo_volume_groups:2772] 1>> /var/hacmp/log/.process_resources_logredo.19923274 +epprd_rg:process_resources(4.822)[logredo_volume_groups:2774] awk '{print $1}' +epprd_rg:process_resources(4.822)[logredo_volume_groups:2774] cat /var/hacmp/log/.process_resources_logredo.19923274 +epprd_rg:process_resources(4.827)[logredo_volume_groups:2774] ALL_LVs=$'epprdaloglv\nsaplv\nsapmntlv\noraclelv\nepplv\noraarchlv\nsapdata1lv\nsapdata2lv\nsapdata3lv\nsapdata4lv\nboardlv\noriglogAlv\noriglogBlv\nmirrlogAlv\nmirrlogBlv' +epprd_rg:process_resources(4.827)[logredo_volume_groups:2777] : Verify if any of the file system associated with volume group datavg +epprd_rg:process_resources(4.827)[logredo_volume_groups:2778] : is already mounted anywhere else in the cluster. +epprd_rg:process_resources(4.827)[logredo_volume_groups:2779] : If it is already mounted somewhere else, we dont want to continue +epprd_rg:process_resources(4.827)[logredo_volume_groups:2780] : here to avoid data corruption. +epprd_rg:process_resources(4.829)[logredo_volume_groups:2782] awk '{print $1}' +epprd_rg:process_resources(4.829)[logredo_volume_groups:2782] cat /var/hacmp/log/.process_resources_logredo.19923274 +epprd_rg:process_resources(4.829)[logredo_volume_groups:2782] grep -v N/A +epprd_rg:process_resources(4.835)[logredo_volume_groups:2782] lv_all=$'saplv\nsapmntlv\noraclelv\nepplv\noraarchlv\nsapdata1lv\nsapdata2lv\nsapdata3lv\nsapdata4lv\nboardlv\noriglogAlv\noriglogBlv\nmirrlogAlv\nmirrlogBlv' +epprd_rg:process_resources(4.835)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.835)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.837)[logredo_volume_groups:2789] lsfs -qc saplv +epprd_rg:process_resources(4.837)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.837)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.838)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/saplv' was found in /etc/filesystems. +epprd_rg:process_resources(4.839)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.843)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.843)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.843)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.843)[logredo_volume_groups:2795] fsdb saplv +epprd_rg:process_resources(4.844)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.850)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.852)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.852)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.852)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.857)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.857)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.857)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.857)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.857)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.860)[logredo_volume_groups:2789] lsfs -qc sapmntlv +epprd_rg:process_resources(4.860)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.860)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.861)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/sapmntlv' was found in /etc/filesystems. +epprd_rg:process_resources(4.862)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.866)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.866)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.866)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.866)[logredo_volume_groups:2795] fsdb sapmntlv +epprd_rg:process_resources(4.867)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.870)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.872)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.873)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.873)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.878)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.878)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.878)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.878)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.878)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.880)[logredo_volume_groups:2789] lsfs -qc oraclelv +epprd_rg:process_resources(4.880)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.881)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.882)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/oraclelv' was found in /etc/filesystems. +epprd_rg:process_resources(4.883)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.887)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.887)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.887)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.887)[logredo_volume_groups:2795] fsdb oraclelv +epprd_rg:process_resources(4.888)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.891)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.893)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.894)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.894)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.899)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.899)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.899)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.899)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.899)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.901)[logredo_volume_groups:2789] lsfs -qc epplv +epprd_rg:process_resources(4.901)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.902)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.902)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/epplv' was found in /etc/filesystems. +epprd_rg:process_resources(4.903)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.907)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.908)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.908)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.908)[logredo_volume_groups:2795] fsdb epplv +epprd_rg:process_resources(4.909)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.912)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.914)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.915)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.915)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.920)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.920)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.920)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.920)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.920)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.922)[logredo_volume_groups:2789] lsfs -qc oraarchlv +epprd_rg:process_resources(4.922)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.923)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.923)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/oraarchlv' was found in /etc/filesystems. +epprd_rg:process_resources(4.924)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.928)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.928)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.928)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.928)[logredo_volume_groups:2795] fsdb oraarchlv +epprd_rg:process_resources(4.930)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.933)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.935)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.935)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.935)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.940)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.941)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.941)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.941)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.941)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.943)[logredo_volume_groups:2789] lsfs -qc sapdata1lv +epprd_rg:process_resources(4.943)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.943)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.944)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/sapdata1lv' was found in /etc/filesystems. +epprd_rg:process_resources(4.945)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.949)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.949)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.949)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.949)[logredo_volume_groups:2795] fsdb sapdata1lv +epprd_rg:process_resources(4.950)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.954)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.956)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.956)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.956)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.961)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.962)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.962)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.962)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.962)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.964)[logredo_volume_groups:2789] lsfs -qc sapdata2lv +epprd_rg:process_resources(4.964)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.964)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.965)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/sapdata2lv' was found in /etc/filesystems. +epprd_rg:process_resources(4.966)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.970)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.970)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.970)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.970)[logredo_volume_groups:2795] fsdb sapdata2lv +epprd_rg:process_resources(4.971)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.974)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.977)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.977)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.977)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(4.982)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(4.982)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(4.982)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(4.982)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(4.982)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(4.984)[logredo_volume_groups:2789] lsfs -qc sapdata3lv +epprd_rg:process_resources(4.985)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(4.985)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(4.986)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/sapdata3lv' was found in /etc/filesystems. +epprd_rg:process_resources(4.987)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(4.991)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(4.991)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(4.991)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(4.991)[logredo_volume_groups:2795] fsdb sapdata3lv +epprd_rg:process_resources(4.992)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(4.995)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(4.997)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(4.998)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(4.998)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.003)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.003)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.003)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.003)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.003)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.005)[logredo_volume_groups:2789] lsfs -qc sapdata4lv +epprd_rg:process_resources(5.005)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.006)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.006)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/sapdata4lv' was found in /etc/filesystems. +epprd_rg:process_resources(5.008)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.012)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.012)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.012)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.012)[logredo_volume_groups:2795] fsdb sapdata4lv +epprd_rg:process_resources(5.013)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.016)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.018)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.019)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.019)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.024)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.024)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.024)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.024)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.024)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.026)[logredo_volume_groups:2789] lsfs -qc boardlv +epprd_rg:process_resources(5.026)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.027)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.027)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/boardlv' was found in /etc/filesystems. +epprd_rg:process_resources(5.028)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.032)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.032)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.032)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.032)[logredo_volume_groups:2795] fsdb boardlv +epprd_rg:process_resources(5.033)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.037)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.039)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.039)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.039)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.044)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.044)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.044)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.044)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.045)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.047)[logredo_volume_groups:2789] lsfs -qc origlogAlv +epprd_rg:process_resources(5.047)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.047)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.048)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/origlogAlv' was found in /etc/filesystems. +epprd_rg:process_resources(5.049)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.053)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.053)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.053)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.053)[logredo_volume_groups:2795] fsdb origlogAlv +epprd_rg:process_resources(5.054)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.058)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.060)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.060)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.060)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.065)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.065)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.065)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.066)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.066)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.067)[logredo_volume_groups:2789] lsfs -qc origlogBlv +epprd_rg:process_resources(5.068)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.068)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.069)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/origlogBlv' was found in /etc/filesystems. +epprd_rg:process_resources(5.070)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.074)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.074)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.074)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.074)[logredo_volume_groups:2795] fsdb origlogBlv +epprd_rg:process_resources(5.075)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.078)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.080)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.081)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.081)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.086)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.086)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.086)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.086)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.086)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.088)[logredo_volume_groups:2789] lsfs -qc mirrlogAlv +epprd_rg:process_resources(5.088)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.089)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.089)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/mirrlogAlv' was found in /etc/filesystems. +epprd_rg:process_resources(5.091)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.095)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.095)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.095)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.095)[logredo_volume_groups:2795] fsdb mirrlogAlv +epprd_rg:process_resources(5.096)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.099)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.101)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.102)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.102)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.107)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.107)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.107)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.107)[logredo_volume_groups:2786] : When a filesystem is protected against concurrent mounting, +epprd_rg:process_resources(5.107)[logredo_volume_groups:2787] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:process_resources(5.109)[logredo_volume_groups:2789] lsfs -qc mirrlogBlv +epprd_rg:process_resources(5.109)[logredo_volume_groups:2789] LC_ALL=C +epprd_rg:process_resources(5.110)[logredo_volume_groups:2789] tr : '\n' +epprd_rg:process_resources(5.110)[logredo_volume_groups:2789] grep -w MountGuard lsfs: No record matching '/var/hacmp/mirrlogBlv' was found in /etc/filesystems. +epprd_rg:process_resources(5.111)[logredo_volume_groups:2789] cut '-d ' -f2 +epprd_rg:process_resources(5.115)[logredo_volume_groups:2789] MOUNTGUARD='' +epprd_rg:process_resources(5.115)[logredo_volume_groups:2792] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:process_resources(5.115)[logredo_volume_groups:2793] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:process_resources(5.115)[logredo_volume_groups:2795] fsdb mirrlogBlv +epprd_rg:process_resources(5.116)[logredo_volume_groups:2795] 0<< \EOF su q EOF +epprd_rg:process_resources(5.120)[logredo_volume_groups:2795] FMMOUNT_OUT='' +epprd_rg:process_resources(5.122)[logredo_volume_groups:2799] echo '' +epprd_rg:process_resources(5.122)[logredo_volume_groups:2799] awk '{ print $1 }' +epprd_rg:process_resources(5.122)[logredo_volume_groups:2799] grep -w FM_MOUNT +epprd_rg:process_resources(5.128)[logredo_volume_groups:2799] FMMOUNT='' +epprd_rg:process_resources(5.128)[logredo_volume_groups:2800] [[ '' == yes ]] +epprd_rg:process_resources(5.128)[logredo_volume_groups:2804] [[ -n '' ]] +epprd_rg:process_resources(5.128)[logredo_volume_groups:2814] comm_failure='' +epprd_rg:process_resources(5.128)[logredo_volume_groups:2815] rc_mount='' +epprd_rg:process_resources(5.128)[logredo_volume_groups:2816] [[ -n '' ]] +epprd_rg:process_resources(5.128)[logredo_volume_groups:2851] logdevs='' +epprd_rg:process_resources(5.128)[logredo_volume_groups:2852] HAVE_GEO='' +epprd_rg:process_resources(5.128)[logredo_volume_groups:2853] lslpp -l 'hageo.*' +epprd_rg:process_resources(5.129)[logredo_volume_groups:2853] 1> /dev/null 2>& 1 +epprd_rg:process_resources(5.132)[logredo_volume_groups:2854] lslpp -l 'geoRM.*' +epprd_rg:process_resources(5.133)[logredo_volume_groups:2854] 1> /dev/null 2>& 1 +epprd_rg:process_resources(5.136)[logredo_volume_groups:2874] pattern='jfs*log' +epprd_rg:process_resources(5.136)[logredo_volume_groups:2876] : Any device with the type as log should be added +epprd_rg:process_resources(5.136)[logredo_volume_groups:2882] odmget -q $'name = epprdaloglv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.139)[logredo_volume_groups:2882] [[ -n $'\nCuAt:\n\tname = "epprdaloglv"\n\tattribute = "type"\n\tvalue = "jfs2log"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.139)[logredo_volume_groups:2884] logdevs=' /dev/epprdaloglv' +epprd_rg:process_resources(5.139)[logredo_volume_groups:2882] odmget -q $'name = saplv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.143)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.143)[logredo_volume_groups:2882] odmget -q $'name = sapmntlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.146)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.146)[logredo_volume_groups:2882] odmget -q $'name = oraclelv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.150)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.150)[logredo_volume_groups:2882] odmget -q $'name = epplv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.153)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.153)[logredo_volume_groups:2882] odmget -q $'name = oraarchlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.157)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.157)[logredo_volume_groups:2882] odmget -q $'name = sapdata1lv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.160)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.160)[logredo_volume_groups:2882] odmget -q $'name = sapdata2lv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.164)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.164)[logredo_volume_groups:2882] odmget -q $'name = sapdata3lv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.168)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.168)[logredo_volume_groups:2882] odmget -q $'name = sapdata4lv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.171)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.171)[logredo_volume_groups:2882] odmget -q $'name = boardlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.175)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.175)[logredo_volume_groups:2882] odmget -q $'name = origlogAlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.178)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.178)[logredo_volume_groups:2882] odmget -q $'name = origlogBlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.182)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.182)[logredo_volume_groups:2882] odmget -q $'name = mirrlogAlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.185)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.185)[logredo_volume_groups:2882] odmget -q $'name = mirrlogBlv and \t\t attribute = type and \t\t value like jfs*log' CuAt +epprd_rg:process_resources(5.189)[logredo_volume_groups:2882] [[ -n '' ]] +epprd_rg:process_resources(5.189)[logredo_volume_groups:2889] : JFS2 file systems can have inline logs where the log LV is the same as the FS LV. +epprd_rg:process_resources(5.189)[logredo_volume_groups:2895] odmget $'-qname = epprdaloglv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.192)[logredo_volume_groups:2895] [[ -n '' ]] +epprd_rg:process_resources(5.192)[logredo_volume_groups:2895] odmget $'-qname = saplv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.196)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "saplv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.198)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.198)[logredo_volume_groups:2898] odmget -q 'name = saplv and attribute = label' CuAt +epprd_rg:process_resources(5.202)[logredo_volume_groups:2898] [[ -n /usr/sap ]] +epprd_rg:process_resources(5.204)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.204)[logredo_volume_groups:2900] grep -wp /dev/saplv /etc/filesystems +epprd_rg:process_resources(5.209)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.209)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.209)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/saplv ]] +epprd_rg:process_resources(5.209)[logredo_volume_groups:2895] odmget $'-qname = sapmntlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.213)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "sapmntlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.215)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.215)[logredo_volume_groups:2898] odmget -q 'name = sapmntlv and attribute = label' CuAt +epprd_rg:process_resources(5.219)[logredo_volume_groups:2898] [[ -n /sapmnt ]] +epprd_rg:process_resources(5.221)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.221)[logredo_volume_groups:2900] grep -wp /dev/sapmntlv /etc/filesystems +epprd_rg:process_resources(5.226)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.226)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.226)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/sapmntlv ]] +epprd_rg:process_resources(5.226)[logredo_volume_groups:2895] odmget $'-qname = oraclelv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.230)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "oraclelv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.232)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.232)[logredo_volume_groups:2898] odmget -q 'name = oraclelv and attribute = label' CuAt +epprd_rg:process_resources(5.236)[logredo_volume_groups:2898] [[ -n /oracle ]] +epprd_rg:process_resources(5.238)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.238)[logredo_volume_groups:2900] grep -wp /dev/oraclelv /etc/filesystems +epprd_rg:process_resources(5.243)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.243)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.243)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/oraclelv ]] +epprd_rg:process_resources(5.244)[logredo_volume_groups:2895] odmget $'-qname = epplv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.247)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "epplv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.249)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.249)[logredo_volume_groups:2898] odmget -q 'name = epplv and attribute = label' CuAt +epprd_rg:process_resources(5.253)[logredo_volume_groups:2898] [[ -n /oracle/EPP ]] +epprd_rg:process_resources(5.255)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.255)[logredo_volume_groups:2900] grep -wp /dev/epplv /etc/filesystems +epprd_rg:process_resources(5.260)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.261)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.261)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/epplv ]] +epprd_rg:process_resources(5.261)[logredo_volume_groups:2895] odmget $'-qname = oraarchlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.264)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "oraarchlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.266)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.266)[logredo_volume_groups:2898] odmget -q 'name = oraarchlv and attribute = label' CuAt +epprd_rg:process_resources(5.270)[logredo_volume_groups:2898] [[ -n /oracle/EPP/oraarch ]] +epprd_rg:process_resources(5.273)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.273)[logredo_volume_groups:2900] grep -wp /dev/oraarchlv /etc/filesystems +epprd_rg:process_resources(5.278)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.278)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.278)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/oraarchlv ]] +epprd_rg:process_resources(5.278)[logredo_volume_groups:2895] odmget $'-qname = sapdata1lv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.281)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "sapdata1lv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.283)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.283)[logredo_volume_groups:2898] odmget -q 'name = sapdata1lv and attribute = label' CuAt +epprd_rg:process_resources(5.288)[logredo_volume_groups:2898] [[ -n /oracle/EPP/sapdata1 ]] +epprd_rg:process_resources(5.290)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.290)[logredo_volume_groups:2900] grep -wp /dev/sapdata1lv /etc/filesystems +epprd_rg:process_resources(5.295)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.295)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.295)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/sapdata1lv ]] +epprd_rg:process_resources(5.295)[logredo_volume_groups:2895] odmget $'-qname = sapdata2lv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.298)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "sapdata2lv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.300)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.301)[logredo_volume_groups:2898] odmget -q 'name = sapdata2lv and attribute = label' CuAt +epprd_rg:process_resources(5.305)[logredo_volume_groups:2898] [[ -n /oracle/EPP/sapdata2 ]] +epprd_rg:process_resources(5.307)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.307)[logredo_volume_groups:2900] grep -wp /dev/sapdata2lv /etc/filesystems +epprd_rg:process_resources(5.312)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.312)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.312)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/sapdata2lv ]] +epprd_rg:process_resources(5.312)[logredo_volume_groups:2895] odmget $'-qname = sapdata3lv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.316)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "sapdata3lv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.318)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.318)[logredo_volume_groups:2898] odmget -q 'name = sapdata3lv and attribute = label' CuAt +epprd_rg:process_resources(5.322)[logredo_volume_groups:2898] [[ -n /oracle/EPP/sapdata3 ]] +epprd_rg:process_resources(5.324)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.324)[logredo_volume_groups:2900] grep -wp /dev/sapdata3lv /etc/filesystems +epprd_rg:process_resources(5.329)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.329)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.329)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/sapdata3lv ]] +epprd_rg:process_resources(5.329)[logredo_volume_groups:2895] odmget $'-qname = sapdata4lv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.333)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "sapdata4lv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.335)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.335)[logredo_volume_groups:2898] odmget -q 'name = sapdata4lv and attribute = label' CuAt +epprd_rg:process_resources(5.339)[logredo_volume_groups:2898] [[ -n /oracle/EPP/sapdata4 ]] +epprd_rg:process_resources(5.341)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.341)[logredo_volume_groups:2900] grep -wp /dev/sapdata4lv /etc/filesystems +epprd_rg:process_resources(5.347)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.347)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.347)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/sapdata4lv ]] +epprd_rg:process_resources(5.347)[logredo_volume_groups:2895] odmget $'-qname = boardlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.350)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "boardlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.353)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.352)[logredo_volume_groups:2898] odmget -q 'name = boardlv and attribute = label' CuAt +epprd_rg:process_resources(5.357)[logredo_volume_groups:2898] [[ -n /board_org ]] +epprd_rg:process_resources(5.359)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.359)[logredo_volume_groups:2900] grep -wp /dev/boardlv /etc/filesystems +epprd_rg:process_resources(5.364)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.364)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.364)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/boardlv ]] +epprd_rg:process_resources(5.364)[logredo_volume_groups:2895] odmget $'-qname = origlogAlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.368)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "origlogAlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.370)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.370)[logredo_volume_groups:2898] odmget -q 'name = origlogAlv and attribute = label' CuAt +epprd_rg:process_resources(5.374)[logredo_volume_groups:2898] [[ -n /oracle/EPP/origlogA ]] +epprd_rg:process_resources(5.376)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.376)[logredo_volume_groups:2900] grep -wp /dev/origlogAlv /etc/filesystems +epprd_rg:process_resources(5.381)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.381)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.381)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/origlogAlv ]] +epprd_rg:process_resources(5.382)[logredo_volume_groups:2895] odmget $'-qname = origlogBlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.385)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "origlogBlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.387)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.387)[logredo_volume_groups:2898] odmget -q 'name = origlogBlv and attribute = label' CuAt +epprd_rg:process_resources(5.391)[logredo_volume_groups:2898] [[ -n /oracle/EPP/origlogB ]] +epprd_rg:process_resources(5.393)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.393)[logredo_volume_groups:2900] grep -wp /dev/origlogBlv /etc/filesystems +epprd_rg:process_resources(5.399)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.399)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.399)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/origlogBlv ]] +epprd_rg:process_resources(5.399)[logredo_volume_groups:2895] odmget $'-qname = mirrlogAlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.402)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "mirrlogAlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.404)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.404)[logredo_volume_groups:2898] odmget -q 'name = mirrlogAlv and attribute = label' CuAt +epprd_rg:process_resources(5.409)[logredo_volume_groups:2898] [[ -n /oracle/EPP/mirrlogA ]] +epprd_rg:process_resources(5.411)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.411)[logredo_volume_groups:2900] grep -wp /dev/mirrlogAlv /etc/filesystems +epprd_rg:process_resources(5.416)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.416)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.416)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/mirrlogAlv ]] +epprd_rg:process_resources(5.416)[logredo_volume_groups:2895] odmget $'-qname = mirrlogBlv and \t\t attribute = type and \t\t value = jfs2' CuAt +epprd_rg:process_resources(5.420)[logredo_volume_groups:2895] [[ -n $'\nCuAt:\n\tname = "mirrlogBlv"\n\tattribute = "type"\n\tvalue = "jfs2"\n\ttype = "R"\n\tgeneric = "DU"\n\trep = "s"\n\tnls_index = 639' ]] +epprd_rg:process_resources(5.422)[logredo_volume_groups:2898] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:process_resources(5.422)[logredo_volume_groups:2898] odmget -q 'name = mirrlogBlv and attribute = label' CuAt +epprd_rg:process_resources(5.426)[logredo_volume_groups:2898] [[ -n /oracle/EPP/mirrlogB ]] +epprd_rg:process_resources(5.428)[logredo_volume_groups:2900] awk '$1 ~ /log/ {printf $3}' +epprd_rg:process_resources(5.428)[logredo_volume_groups:2900] grep -wp /dev/mirrlogBlv /etc/filesystems +epprd_rg:process_resources(5.433)[logredo_volume_groups:2900] LOG=/dev/epprdaloglv +epprd_rg:process_resources(5.433)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == INLINE ]] +epprd_rg:process_resources(5.433)[logredo_volume_groups:2901] [[ /dev/epprdaloglv == /dev/mirrlogBlv ]] +epprd_rg:process_resources(5.433)[logredo_volume_groups:2910] : Remove any duplicates acquired so far +epprd_rg:process_resources(5.436)[logredo_volume_groups:2912] echo /dev/epprdaloglv +epprd_rg:process_resources(5.436)[logredo_volume_groups:2912] sort -u +epprd_rg:process_resources(5.436)[logredo_volume_groups:2912] tr ' ' '\n' +epprd_rg:process_resources(5.442)[logredo_volume_groups:2912] logdevs=/dev/epprdaloglv +epprd_rg:process_resources(5.442)[logredo_volume_groups:2915] : Run logredos in parallel to save time. +epprd_rg:process_resources(5.442)[logredo_volume_groups:2919] [[ -n '' ]] +epprd_rg:process_resources(5.442)[logredo_volume_groups:2944] : Run logredo only if the LV is closed. +epprd_rg:process_resources(5.442)[logredo_volume_groups:2946] awk '$1 ~ /^epprdaloglv$/ && $6 ~ /closed\// {print "CLOSED"}' /var/hacmp/log/.process_resources_logredo.19923274 +epprd_rg:process_resources(5.447)[logredo_volume_groups:2946] [[ -n CLOSED ]] +epprd_rg:process_resources(5.447)[logredo_volume_groups:2949] : Run logredo only if filesystem is not mounted on any of the node in the cluster. +epprd_rg:process_resources(5.447)[logredo_volume_groups:2951] [[ -z '' ]] +epprd_rg:process_resources(5.448)[logredo_volume_groups:2958] rm -f /var/hacmp/log/.process_resources_logredo.19923274 +epprd_rg:process_resources(5.448)[logredo_volume_groups:2953] logredo /dev/epprdaloglv +epprd_rg:process_resources(5.451)[logredo_volume_groups:2962] : Wait for the background logredos from the RGs +epprd_rg:process_resources(5.451)[logredo_volume_groups:2964] wait J2_LOGREDO:log redo processing for /dev/epprdaloglv +epprd_rg:process_resources(5.481)[logredo_volume_groups:2966] return 0 +epprd_rg:process_resources(5.481)[3324] true +epprd_rg:process_resources(5.481)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(5.481)[3328] set -a +epprd_rg:process_resources(5.481)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-09-30T03:25:20.317768 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(5.500)[3329] eval JOB_TYPE=FILESYSTEMS ACTION=ACQUIRE FILE_SYSTEMS='"/board_org,/oracle,/oracle/EPP,/oracle/EPP/mirrlogA,/oracle/EPP/mirrlogB,/oracle/EPP/oraarch,/oracle/EPP/origlogA,/oracle/EPP/origlogB,/oracle/EPP/sapdata1,/oracle/EPP/sapdata2,/oracle/EPP/sapdata3,/oracle/EPP/sapdata4,/sapmnt,/usr/sap"' RESOURCE_GROUPS='"epprd_rg' '"' FSCHECK_TOOLS='"fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck"' RECOVERY_METHODS='"sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential"' +epprd_rg:process_resources(5.501)[1] JOB_TYPE=FILESYSTEMS +epprd_rg:process_resources(5.501)[1] ACTION=ACQUIRE +epprd_rg:process_resources(5.501)[1] FILE_SYSTEMS=/board_org,/oracle,/oracle/EPP,/oracle/EPP/mirrlogA,/oracle/EPP/mirrlogB,/oracle/EPP/oraarch,/oracle/EPP/origlogA,/oracle/EPP/origlogB,/oracle/EPP/sapdata1,/oracle/EPP/sapdata2,/oracle/EPP/sapdata3,/oracle/EPP/sapdata4,/sapmnt,/usr/sap +epprd_rg:process_resources(5.501)[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources(5.501)[1] FSCHECK_TOOLS=fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck +epprd_rg:process_resources(5.501)[1] RECOVERY_METHODS=sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:process_resources(5.501)[3330] RC=0 +epprd_rg:process_resources(5.501)[3331] set +a +epprd_rg:process_resources(5.501)[3333] (( 0 != 0 )) +epprd_rg:process_resources(5.501)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(5.501)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(5.501)[3343] export GROUPNAME +epprd_rg:process_resources(5.501)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(5.501)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(5.501)[3360] [[ FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources(5.501)[3360] [[ FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources(5.501)[3482] process_file_systems ACQUIRE +epprd_rg:process_resources(5.501)[process_file_systems:2640] PS4_FUNC=process_file_systems +epprd_rg:process_resources(5.501)[process_file_systems:2640] typeset PS4_FUNC +epprd_rg:process_resources(5.501)[process_file_systems:2641] [[ high == high ]] +epprd_rg:process_resources(5.501)[process_file_systems:2641] set -x +epprd_rg:process_resources(5.501)[process_file_systems:2643] STAT=0 +epprd_rg:process_resources(5.501)[process_file_systems:2645] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources(5.501)[process_file_systems:2647] cl_activate_fs +epprd_rg:cl_activate_fs[819] version=1.1.8.5 +epprd_rg:cl_activate_fs[823] : Check for mounting OEM file systems +epprd_rg:cl_activate_fs[825] OEM_FS=false +epprd_rg:cl_activate_fs[826] (( 0 != 0 )) +epprd_rg:cl_activate_fs[832] STATUS=0 +epprd_rg:cl_activate_fs[832] typeset -li STATUS +epprd_rg:cl_activate_fs[833] EMULATE=REAL +epprd_rg:cl_activate_fs[836] : The environment variable MOUNT_WLMCNTRL_SELFMANAGE is referred inside mount. +epprd_rg:cl_activate_fs[837] : If this variable is set, few calls to wlmcntrl are skipped inside mount, which +epprd_rg:cl_activate_fs[838] : offers performance benefits. Hence we will export this variable if it is set +epprd_rg:cl_activate_fs[839] : in /etc/environment. +epprd_rg:cl_activate_fs[841] grep -w ^MOUNT_WLMCNTRL_SELFMANAGE /etc/environment +epprd_rg:cl_activate_fs[841] export eval +epprd_rg:cl_activate_fs[843] [[ -n FILESYSTEMS ]] +epprd_rg:cl_activate_fs[843] [[ FILESYSTEMS != GROUP ]] +epprd_rg:cl_activate_fs[846] : If JOB_TYPE is set, and it does not equal to GROUP, then +epprd_rg:cl_activate_fs[847] : we are processing for process_resources, which passes requests +epprd_rg:cl_activate_fs[848] : associaed with multiple resource groups through environment variables +epprd_rg:cl_activate_fs[850] activate_fs_process_resources +epprd_rg:cl_activate_fs[activate_fs_process_resources:716] [[ high == high ]] +epprd_rg:cl_activate_fs[activate_fs_process_resources:716] set -x +epprd_rg:cl_activate_fs[activate_fs_process_resources:718] ERRSTATUS=0 +epprd_rg:cl_activate_fs[activate_fs_process_resources:718] typeset -i ERRSTATUS +epprd_rg:cl_activate_fs[activate_fs_process_resources:719] RC=0 +epprd_rg:cl_activate_fs[activate_fs_process_resources:719] typeset -li RC +epprd_rg:cl_activate_fs[activate_fs_process_resources:742] export GROUPNAME +epprd_rg:cl_activate_fs[activate_fs_process_resources:745] : Get the file systems, recovery tool and procedure for this +epprd_rg:cl_activate_fs[activate_fs_process_resources:746] : resource group +epprd_rg:cl_activate_fs[activate_fs_process_resources:748] print /board_org,/oracle,/oracle/EPP,/oracle/EPP/mirrlogA,/oracle/EPP/mirrlogB,/oracle/EPP/oraarch,/oracle/EPP/origlogA,/oracle/EPP/origlogB,/oracle/EPP/sapdata1,/oracle/EPP/sapdata2,/oracle/EPP/sapdata3,/oracle/EPP/sapdata4,/sapmnt,/usr/sap +epprd_rg:cl_activate_fs[activate_fs_process_resources:748] read _RG_FILE_SYSTEMS FILE_SYSTEMS +epprd_rg:cl_activate_fs[activate_fs_process_resources:748] IFS=: +epprd_rg:cl_activate_fs[activate_fs_process_resources:749] print fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck +epprd_rg:cl_activate_fs[activate_fs_process_resources:749] read _RG_FSCHECK_TOOLS FSCHECK_TOOLS +epprd_rg:cl_activate_fs[activate_fs_process_resources:749] IFS=: +epprd_rg:cl_activate_fs[activate_fs_process_resources:750] print sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:cl_activate_fs[activate_fs_process_resources:750] read _RG_RECOVERY_METHODS RECOVERY_METHODS +epprd_rg:cl_activate_fs[activate_fs_process_resources:750] IFS=: +epprd_rg:cl_activate_fs[activate_fs_process_resources:753] : Since all file systems in a resource group use the same recovery +epprd_rg:cl_activate_fs[activate_fs_process_resources:754] : method and recovery means, just pick up the first one in the list +epprd_rg:cl_activate_fs[activate_fs_process_resources:756] print fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck,fsck +epprd_rg:cl_activate_fs[activate_fs_process_resources:756] read FSCHECK_TOOL rest +epprd_rg:cl_activate_fs[activate_fs_process_resources:756] IFS=, +epprd_rg:cl_activate_fs[activate_fs_process_resources:757] print sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential,sequential +epprd_rg:cl_activate_fs[activate_fs_process_resources:757] read RECOVERY_METHOD rest +epprd_rg:cl_activate_fs[activate_fs_process_resources:757] IFS=, +epprd_rg:cl_activate_fs[activate_fs_process_resources:760] : If there are any unmounted file systems for this resource group, go +epprd_rg:cl_activate_fs[activate_fs_process_resources:761] : recover and mount them. +epprd_rg:cl_activate_fs[activate_fs_process_resources:763] [[ -n /board_org,/oracle,/oracle/EPP,/oracle/EPP/mirrlogA,/oracle/EPP/mirrlogB,/oracle/EPP/oraarch,/oracle/EPP/origlogA,/oracle/EPP/origlogB,/oracle/EPP/sapdata1,/oracle/EPP/sapdata2,/oracle/EPP/sapdata3,/oracle/EPP/sapdata4,/sapmnt,/usr/sap ]] +epprd_rg:cl_activate_fs[activate_fs_process_resources:765] IFS=, +epprd_rg:cl_activate_fs[activate_fs_process_resources:765] set -- /board_org,/oracle,/oracle/EPP,/oracle/EPP/mirrlogA,/oracle/EPP/mirrlogB,/oracle/EPP/oraarch,/oracle/EPP/origlogA,/oracle/EPP/origlogB,/oracle/EPP/sapdata1,/oracle/EPP/sapdata2,/oracle/EPP/sapdata3,/oracle/EPP/sapdata4,/sapmnt,/usr/sap +epprd_rg:cl_activate_fs[activate_fs_process_resources:765] print /board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap +epprd_rg:cl_activate_fs[activate_fs_process_resources:765] RG_FILE_SYSTEMS='/board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap' +epprd_rg:cl_activate_fs[activate_fs_process_resources:766] activate_fs_process_group sequential fsck '/board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap' +epprd_rg:cl_activate_fs[activate_fs_process_group:362] PS4_LOOP='' +epprd_rg:cl_activate_fs[activate_fs_process_group:362] typeset PS4_LOOP +epprd_rg:cl_activate_fs[activate_fs_process_group:363] [[ high == high ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:363] set -x +epprd_rg:cl_activate_fs[activate_fs_process_group:365] typeset RECOVERY_METHOD FSCHECK_TOOL FILESYSTEMS +epprd_rg:cl_activate_fs[activate_fs_process_group:366] STATUS=0 +epprd_rg:cl_activate_fs[activate_fs_process_group:366] typeset -i STATUS +epprd_rg:cl_activate_fs[activate_fs_process_group:368] RECOVERY_METHOD=sequential +epprd_rg:cl_activate_fs[activate_fs_process_group:369] FSCHECK_TOOL=fsck +epprd_rg:cl_activate_fs[activate_fs_process_group:370] shift 2 +epprd_rg:cl_activate_fs[activate_fs_process_group:371] FILESYSTEMS='/board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap' +epprd_rg:cl_activate_fs[activate_fs_process_group:372] comm_failure='' +epprd_rg:cl_activate_fs[activate_fs_process_group:372] typeset comm_failure +epprd_rg:cl_activate_fs[activate_fs_process_group:373] rc_mount='' +epprd_rg:cl_activate_fs[activate_fs_process_group:373] typeset rc_mount +epprd_rg:cl_activate_fs[activate_fs_process_group:376] : Filter out duplicates, and file systems which are already mounted +epprd_rg:cl_activate_fs[activate_fs_process_group:378] mounts_to_do '/board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap' +epprd_rg:cl_activate_fs[mounts_to_do:283] tomount='/board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap' +epprd_rg:cl_activate_fs[mounts_to_do:283] typeset tomount +epprd_rg:cl_activate_fs[mounts_to_do:286] : Get most current list of mounted filesystems +epprd_rg:cl_activate_fs[mounts_to_do:288] mount +epprd_rg:cl_activate_fs[mounts_to_do:288] 2> /dev/null +epprd_rg:cl_activate_fs[mounts_to_do:288] paste -s - +epprd_rg:cl_activate_fs[mounts_to_do:288] awk '$3 ~ /jfs2*$/ {print $2}' +epprd_rg:cl_activate_fs[mounts_to_do:288] mounted=$'/\t/usr\t/var\t/tmp\t/home\t/admin\t/opt\t/var/adm/ras/livedump\t/ptf' +epprd_rg:cl_activate_fs[mounts_to_do:288] typeset mounted +epprd_rg:cl_activate_fs[mounts_to_do:291] shift +epprd_rg:cl_activate_fs[mounts_to_do:294] typeset -A mountedArray tomountArray +epprd_rg:cl_activate_fs[mounts_to_do:295] typeset fs +epprd_rg:cl_activate_fs[mounts_to_do:298] : Create an associative array for each list, which +epprd_rg:cl_activate_fs[mounts_to_do:299] : has the side effect of dropping any duplicates +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/usr]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/var]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/tmp]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/home]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/admin]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/opt]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/var/adm/ras/livedump]=1 +epprd_rg:cl_activate_fs[mounts_to_do:302] mountedArray[/ptf]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/board_org]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/mirrlogA]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/mirrlogB]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/oraarch]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/origlogA]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/origlogB]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/sapdata1]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/sapdata2]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/sapdata3]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/oracle/EPP/sapdata4]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/sapmnt]=1 +epprd_rg:cl_activate_fs[mounts_to_do:306] tomountArray[/usr/sap]=1 +epprd_rg:cl_activate_fs[mounts_to_do:310] mounted='' +epprd_rg:cl_activate_fs[mounts_to_do:311] tomount='' +epprd_rg:cl_activate_fs[mounts_to_do:314] : expand fs from all tomountArray subscript names +epprd_rg:cl_activate_fs[mounts_to_do:316] set +u +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:319] [[ '' == 1 ]] +epprd_rg:cl_activate_fs[mounts_to_do:329] : Print all subscript names which are all remaining mount +epprd_rg:cl_activate_fs[mounts_to_do:330] : points which have to be mounted +epprd_rg:cl_activate_fs[mounts_to_do:332] print /board_org /oracle /oracle/EPP /oracle/EPP/mirrlogA /oracle/EPP/mirrlogB /oracle/EPP/oraarch /oracle/EPP/origlogA /oracle/EPP/origlogB /oracle/EPP/sapdata1 /oracle/EPP/sapdata2 /oracle/EPP/sapdata3 /oracle/EPP/sapdata4 /sapmnt /usr/sap +epprd_rg:cl_activate_fs[mounts_to_do:332] sort -u +epprd_rg:cl_activate_fs[mounts_to_do:332] tr ' ' '\n' +epprd_rg:cl_activate_fs[mounts_to_do:334] set -u +epprd_rg:cl_activate_fs[activate_fs_process_group:378] FILESYSTEMS=$'/board_org\n/oracle\n/oracle/EPP\n/oracle/EPP/mirrlogA\n/oracle/EPP/mirrlogB\n/oracle/EPP/oraarch\n/oracle/EPP/origlogA\n/oracle/EPP/origlogB\n/oracle/EPP/sapdata1\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata4\n/sapmnt\n/usr/sap' +epprd_rg:cl_activate_fs[activate_fs_process_group:379] [[ -z $'/board_org\n/oracle\n/oracle/EPP\n/oracle/EPP/mirrlogA\n/oracle/EPP/mirrlogB\n/oracle/EPP/oraarch\n/oracle/EPP/origlogA\n/oracle/EPP/origlogB\n/oracle/EPP/sapdata1\n/oracle/EPP/sapdata2\n/oracle/EPP/sapdata3\n/oracle/EPP/sapdata4\n/sapmnt\n/usr/sap' ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:385] : Get unique temporary file names by using the resource group and the +epprd_rg:cl_activate_fs[activate_fs_process_group:386] : current process ID +epprd_rg:cl_activate_fs[activate_fs_process_group:388] [[ -z epprd_rg ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:397] TMP_FILENAME=epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs[activate_fs_process_group:398] rm -f /tmp/epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs[activate_fs_process_group:401] : If FSCHECK_TOOL is null get from ODM +epprd_rg:cl_activate_fs[activate_fs_process_group:403] [[ -z fsck ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:408] print fsck +epprd_rg:cl_activate_fs[activate_fs_process_group:408] FSCHECK_TOOL=fsck +epprd_rg:cl_activate_fs[activate_fs_process_group:409] [[ fsck != fsck ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:416] : If RECOVERY_METHOD is null get from ODM +epprd_rg:cl_activate_fs[activate_fs_process_group:418] [[ -z sequential ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:423] print sequential +epprd_rg:cl_activate_fs[activate_fs_process_group:423] RECOVERY_METHOD=sequential +epprd_rg:cl_activate_fs[activate_fs_process_group:424] [[ sequential != sequential ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:431] set -u +epprd_rg:cl_activate_fs[activate_fs_process_group:434] : If FSCHECK_TOOL is set to logredo, the logredo for each jfslog has +epprd_rg:cl_activate_fs[activate_fs_process_group:435] : already been done in get_disk_vg_fs, so we only need to do fsck check +epprd_rg:cl_activate_fs[activate_fs_process_group:436] : and recovery here before going on to do the mounts +epprd_rg:cl_activate_fs[activate_fs_process_group:438] [[ fsck == fsck ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:441] TOOL='/usr/sbin/fsck -f -p -o nologredo' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:445] PS4_LOOP=/board_org +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:446] lsfs /board_org +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:446] grep -w /board_org +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:449] : Verify if any of the file system /board_org is already mounted anywhere +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] lsfs -qc /board_org +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:463] fsdb /board_org +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/board_org\n\nFile System Size:\t\t10485032\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t16384\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000009ffd28\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t91\n[10] s_agsize:\t\t0x00004000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x0013ffa5\n \t\t s_fsckpxd.address:\t1310629\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'boardl\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5832\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000000b5\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t181\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe04\t[52] last unmounted:\t0x65152392\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/board_org\n\nFile System Size:\t\t10485032\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t16384\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000009ffd28\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t91\n[10] s_agsize:\t\t0x00004000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x0013ffa5\n \t\t s_fsckpxd.address:\t1310629\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'boardl\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5832\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000000b5\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t181\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe04\t[52] last unmounted:\t0x65152392\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/board_org[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/boardlv The current volume is: /dev/boardlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:445] PS4_LOOP=/oracle +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:446] lsfs /oracle +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:446] grep -w /oracle +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:449] : Verify if any of the file system /oracle is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] lsfs -qc /oracle +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:463] fsdb /oracle +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle\n\nFile System Size:\t\t41941352\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t65536\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000027ff968\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t211\n[10] s_agsize:\t\t0x00010000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x004fff2d\n \t\t s_fsckpxd.address:\t5242669\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'oracle\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5819\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000295\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t661\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe05\t[52] last unmounted:\t0x65152392\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle\n\nFile System Size:\t\t41941352\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t65536\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000027ff968\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t211\n[10] s_agsize:\t\t0x00010000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x004fff2d\n \t\t s_fsckpxd.address:\t5242669\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'oracle\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5819\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000295\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t661\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe05\t[52] last unmounted:\t0x65152392\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/oraclelv The current volume is: /dev/oraclelv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:446] lsfs /oracle/EPP +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:446] grep -w /oracle/EPP +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] lsfs -qc /oracle/EPP +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:463] fsdb /oracle/EPP +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP\n\nFile System Size:\t\t62912232\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t65536\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x0000000003bff6e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t291\n[10] s_agsize:\t\t0x00010000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x0077fedd\n \t\t s_fsckpxd.address:\t7864029\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'epplv\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5824\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000003d5\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t981\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe05\t[52] last unmounted:\t0x65152391\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP\n\nFile System Size:\t\t62912232\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t65536\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x0000000003bff6e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t291\n[10] s_agsize:\t\t0x00010000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x0077fedd\n \t\t s_fsckpxd.address:\t7864029\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'epplv\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5824\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000003d5\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t981\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe05\t[52] last unmounted:\t0x65152391\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/epplv The current volume is: /dev/epplv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:446] lsfs /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:446] grep -w /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/mirrlogA is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] lsfs -qc /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:463] fsdb /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/mirrlogA\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'mirrlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5834\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe05\t[52] last unmounted:\t0x65152390\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/mirrlogA\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'mirrlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5834\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe05\t[52] last unmounted:\t0x65152390\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogA[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/mirrlogAlv The current volume is: /dev/mirrlogAlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:446] lsfs /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:446] grep -w /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/mirrlogB is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] lsfs -qc /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:463] fsdb /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/mirrlogB\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'mirrlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5835\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe05\t[52] last unmounted:\t0x65152390\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/mirrlogB\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'mirrlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5835\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe05\t[52] last unmounted:\t0x65152390\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/mirrlogB[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/mirrlogBlv The current volume is: /dev/mirrlogBlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/oraarch +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:446] lsfs /oracle/EPP/oraarch +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:446] grep -w /oracle/EPP/oraarch +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/oraarch is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] lsfs -qc /oracle/EPP/oraarch +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:463] fsdb /oracle/EPP/oraarch +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/oraarch\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'oraarc\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d582e\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe05\t[52] last unmounted:\t0x6515238f\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/oraarch\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'oraarc\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d582e\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe05\t[52] last unmounted:\t0x6515238f\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/oraarch[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/oraarchlv The current volume is: /dev/oraarchlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/origlogA +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:446] lsfs /oracle/EPP/origlogA +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:446] grep -w /oracle/EPP/origlogA +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/origlogA is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] lsfs -qc /oracle/EPP/origlogA +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:463] fsdb /oracle/EPP/origlogA +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/origlogA\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'origlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5832\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe05\t[52] last unmounted:\t0x6515238f\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/origlogA\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'origlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5832\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe05\t[52] last unmounted:\t0x6515238f\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogA[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/origlogAlv The current volume is: /dev/origlogAlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/origlogB +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:446] lsfs /oracle/EPP/origlogB +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:446] grep -w /oracle/EPP/origlogB +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/origlogB is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] lsfs -qc /oracle/EPP/origlogB +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:463] fsdb /oracle/EPP/origlogB +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/origlogB\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'origlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5833\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe05\t[52] last unmounted:\t0x6515238f\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/origlogB\n\nFile System Size:\t\t10482792\t(512 byte blocks)\nAggregate Block Size:\t\t512\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t400\n[3] s_size:\t0x00000000009ff468\t[20] s_bsize:\t\t512\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t9\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t0\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t2968\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x009ff468\n \t\t s_fsckpxd.address:\t10482792\n \t\t[28] s_ait.len:\t\t32\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x00000058\n J2_MOUNTGUARD \t\t s_ait.address:\t88\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'origlo\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5833\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t32\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x000028b0\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t10416\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe05\t[52] last unmounted:\t0x6515238f\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/origlogB[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/origlogBlv The current volume is: /dev/origlogBlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:446] lsfs /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:446] grep -w /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/sapdata1 is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] lsfs -qc /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:463] fsdb /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/sapdata1\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d582f\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe05\t[52] last unmounted:\t0x6515238e\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/sapdata1\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d582f\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe05\t[52] last unmounted:\t0x6515238e\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata1[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/sapdata1lv The current volume is: /dev/sapdata1lv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:446] lsfs /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:446] grep -w /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/sapdata2 is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] lsfs -qc /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:463] fsdb /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/sapdata2\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5830\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe06\t[52] last unmounted:\t0x6515238e\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/sapdata2\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5830\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe06\t[52] last unmounted:\t0x6515238e\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata2[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/sapdata2lv The current volume is: /dev/sapdata2lv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:446] lsfs /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:446] grep -w /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/sapdata3 is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] lsfs -qc /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:463] fsdb /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/sapdata3\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5831\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe06\t[52] last unmounted:\t0x6515238e\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/sapdata3\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5831\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe06\t[52] last unmounted:\t0x6515238e\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata3[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/sapdata3lv The current volume is: /dev/sapdata3lv Primary superblock is valid. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:445] PS4_LOOP=/oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:446] lsfs /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:446] grep -w /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:449] : Verify if any of the file system /oracle/EPP/sapdata4 is already mounted anywhere +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] lsfs -qc /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:463] fsdb /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/oracle/EPP/sapdata4\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5831\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe06\t[52] last unmounted:\t0x6515238e\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/oracle/EPP/sapdata4\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapdat\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5831\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe06\t[52] last unmounted:\t0x6515238e\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/oracle/EPP/sapdata4[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/sapdata4lv The current volume is: /dev/sapdata4lv Primary superblock is valid. +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:445] PS4_LOOP=/sapmnt +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:446] lsfs /sapmnt +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:446] grep -w /sapmnt +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:449] : Verify if any of the file system /sapmnt is already mounted anywhere +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] lsfs -qc /sapmnt +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:463] fsdb /sapmnt +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/sapmnt\n\nFile System Size:\t\t20970472\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t32768\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000013ffbe8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t131\n[10] s_agsize:\t\t0x00008000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x0027ff7d\n \t\t s_fsckpxd.address:\t2621309\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapmnt\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5818\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000155\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t341\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe06\t[52] last unmounted:\t0x6515238d\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/sapmnt\n\nFile System Size:\t\t20970472\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t32768\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000013ffbe8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t131\n[10] s_agsize:\t\t0x00008000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x0027ff7d\n \t\t s_fsckpxd.address:\t2621309\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'sapmnt\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5818\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000155\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t341\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe06\t[52] last unmounted:\t0x6515238d\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/sapmnt[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/sapmntlv The current volume is: /dev/sapmntlv Primary superblock is valid. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:445] PS4_LOOP=/usr/sap +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:446] lsfs /usr/sap +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:446] grep -w /usr/sap +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:446] read DEV rest +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:449] : Verify if any of the file system /usr/sap is already mounted anywhere +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:450] : else in the cluster. If it is already mounted somewhere else, +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:451] : we dont want to continue here to avoid data corruption. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:454] : When a filesystem is protected against concurrent mounting, +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:455] : MountGuard flag is set and lsfs command displays characteristics of file systems. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] lsfs -qc /usr/sap +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] LC_ALL=C +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] tr : '\n' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] cut '-d ' -f2 +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] grep -w MountGuard +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:457] MOUNTGUARD='yes)' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:460] : fsdb and its subcommands allow us to view the information in a file system. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:461] : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:463] fsdb /usr/sap +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:463] 0<< \EOF su q EOF +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:463] FMMOUNT_OUT=$'\nFile System:\t\t\t/usr/sap\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'saplv\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5815\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe06\t[52] last unmounted:\t0x6515238d\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:467] echo $'\nFile System:\t\t\t/usr/sap\n\nFile System Size:\t\t104853992\t(512 byte blocks)\nAggregate Block Size:\t\t4096\nAllocation Group Size:\t\t131072\t(aggregate blocks)\n\n> [1] s_magic:\t\t\'J2FS\'\t\t[18] s_fscklog:\t\t1\n[2] s_version:\t\t2\t\t[19] s_fsckloglen:\t50\n[3] s_size:\t0x00000000063ff1e8\t[20] s_bsize:\t\t4096\n[4] s_logdev:\t0x8000003300000001\t[21] s_logserial:\t0x00000006\n[5] s_l2bsize:\t\t12\t\t[22] s_logpxd.len:\t0\n[6] s_l2bfactor:\t3\t\t[23] s_logpxd.addr1:\t0x00\n[7] s_pbsize:\t\t512\t\t[24] s_logpxd.addr2:\t0x00000000\n[8] s_l2pbsize:\t\t9\t\t s_logpxd.address:\t0\n[9] s_devbsize:\t\t512\t\t[25] s_fsckpxd.len:\t451\n[10] s_agsize:\t\t0x00020000\t[26] s_fsckpxd.addr1:\t0x00\n[11] s_flag:\t\t0x02000100\t[27] s_fsckpxd.addr2:\t0x00c7fe3d\n \t\t s_fsckpxd.address:\t13106749\n \t\t[28] s_ait.len:\t\t4\n J2_GROUPCOMMIT \t\t[29] s_ait.addr1:\t0x00\n \t\t[30] s_ait.addr2:\t0x0000000b\n J2_MOUNTGUARD \t\t s_ait.address:\t11\n[12] s_state:\t\t0x00000000\t[31] s_fpack:\t\t\'saplv\'\n FM_CLEAN \t[32] s_fname:\t\t\'\'\n[13] s_time.tj_sec: 0x00000000639d5815\t[33] s_time.tj_nsec:\t0x00000000\n[14] s_ait2.len:\t4\t\t[34] s_xfsckpxd.len:\t0\n[15] s_ait2.addr1:\t0x00\t\t[35] s_xfsckpxd.addr1:\t0x00\n[16] s_ait2.addr2:\t0x00000656\t[36] s_xfsckpxd.addr2:\t0x00000000\n s_ait2.address:\t1622\t\t s_xfsckpxd.address:\t0\n[17] s_xsize: 0x0000000000000000\t[37] s_xlogpxd.len:\t0\n[40] feature_compat: 0x0000000000000005 [38] s_xlogpxd.addr1:\t0x00\n[41] feature_rdonly: 0x0000000000000000 [39] s_xlogpxd.addr2:\t0x00000000\n[42] feature_incompat: 0x0000000000000000 s_xlogpxd.address:\t0\n[43-49] <...snapshot info...>\t\t[50] s_maxext:\t0x00000000\n s_state_ts[8]:\n[51] last mounted:\t0x63d4fe06\t[52] last unmounted:\t0x6515238d\n[53] last marked dirty:\t0x00000000\t[54] last recovered:\t0x00000000\n[55] last size change:\t0x00000000\t[56] unused timestamp:\t0x00000000\n[57] unused timestamp:\t0x00000000\t[58] unused timestamp:\t0x00000000\n[59] s_szchng:\t\t0x00000000\t[60] s_origAGSZ:\t0x00000000\n[61] s_origSZ:\t0x0000000000000000\ndisplay_super: [m]odify, [s]napshot info or e[x]it: > ' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:467] grep -w FM_MOUNT +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:467] awk '{ print $1 }' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:467] FMMOUNT='' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:468] fsck_check='' +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:469] [[ 'yes)' == yes ]] +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:473] [[ -n '' ]] +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:503] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:508] /usr/sbin/fsck -f -p -o nologredo /dev/saplv The current volume is: /dev/saplv Primary superblock is valid. +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:513] : Allow any backgrounded fsck operations to finish +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:515] wait +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:519] : Now attempt to mount all the file systems +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:521] ALLFS=All_filesystems +epprd_rg:cl_activate_fs:/usr/sap[activate_fs_process_group:522] cl_RMupdate resource_acquiring All_filesystems cl_activate_fs 2023-09-30T03:25:21.117550 2023-09-30T03:25:21.121938 +epprd_rg:cl_activate_fs(0.792):/usr/sap[activate_fs_process_group:524] PS4_TIMER=true +epprd_rg:cl_activate_fs(0.792):/usr/sap[activate_fs_process_group:524] typeset PS4_TIMER +epprd_rg:cl_activate_fs(0.792):/board_org[activate_fs_process_group:527] PS4_LOOP=/board_org +epprd_rg:cl_activate_fs(0.792):/board_org[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(0.792):/board_org[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(0.792):/board_org[activate_fs_process_group:540] fs_mount /board_org fsck epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(0.792):/board_org[fs_mount:69] FS=/board_org +epprd_rg:cl_activate_fs(0.792):/board_org[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(0.792):/board_org[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(0.792):/board_org[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(0.792):/board_org[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(0.792):/board_org[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(0.792):/board_org[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(0.792):/board_org[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(0.792):/board_org[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(0.792):/board_org[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(0.792):/board_org[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(0.792):/board_org[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(0.792):/board_org[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(0.792):/board_org[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(0.792):/board_org[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(0.792):/board_org[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(0.792):/board_org[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(0.792):/board_org[fs_mount:81] : Here check to see if the information in /etc/filesystems for /board_org +epprd_rg:cl_activate_fs(0.792):/board_org[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(0.792):/board_org[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(0.792):/board_org[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(0.792):/board_org[fs_mount:86] lsfs -c /board_org +epprd_rg:cl_activate_fs(0.793):/board_org[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(0.798):/board_org[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(0.793):/board_org[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(0.798):/board_org[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(0.798):/board_org[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(0.799):/board_org[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(0.793):/board_org[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(0.800):/board_org[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(0.801):/board_org[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(0.801):/board_org[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(0.802):/board_org[fs_mount:100] LV_name=boardlv +epprd_rg:cl_activate_fs(0.802):/board_org[fs_mount:101] getlvcb -T -A boardlv +epprd_rg:cl_activate_fs(0.803):/board_org[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(0.821):/board_org[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(0.803):/board_org[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.11 \n\t lvname = boardlv \n\t label = /board_org \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:47 2022\n \t time modified = Sat Jan 28 17:10:40 2023\n ' +epprd_rg:cl_activate_fs(0.821):/board_org[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(0.821):/board_org[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(0.822):/board_org[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(0.803):/board_org[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.11 \n\t lvname = boardlv \n\t label = /board_org \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:47 2022\n \t time modified = Sat Jan 28 17:10:40 2023\n ' +epprd_rg:cl_activate_fs(0.822):/board_org[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(0.823):/board_org[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(0.825):/board_org[fs_mount:115] clodmget -q 'name = boardlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(0.828):/board_org[fs_mount:115] CuAt_label=/board_org +epprd_rg:cl_activate_fs(0.828):/board_org[fs_mount:118] : At this point, if things are working correctly, /board_org from /etc/filesystems +epprd_rg:cl_activate_fs(0.828):/board_org[fs_mount:119] : should match /board_org from CuAt ODM and /board_org from the LVCB +epprd_rg:cl_activate_fs(0.828):/board_org[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(0.828):/board_org[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(0.828):/board_org[fs_mount:123] [[ /board_org != /board_org ]] +epprd_rg:cl_activate_fs(0.828):/board_org[fs_mount:128] [[ /board_org != /board_org ]] +epprd_rg:cl_activate_fs(0.828):/board_org[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(0.828):/board_org[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(0.828):/board_org[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(0.849):/board_org[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(0.849):/board_org[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(0.849):/board_org[fs_mount:160] amlog_trace '' 'Activating Filesystem|/board_org' +epprd_rg:cl_activate_fs(0.849):/board_org[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(0.850):/board_org[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(0.874):/board_org[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(0.877):/board_org[amlog_trace:319] DATE=2023-09-30T03:25:21.207393 +epprd_rg:cl_activate_fs(0.877):/board_org[amlog_trace:320] echo '|2023-09-30T03:25:21.207393|INFO: Activating Filesystem|/board_org' +epprd_rg:cl_activate_fs(0.877):/board_org[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(0.877):/board_org[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(0.879):/board_org[fs_mount:162] : Try to mount filesystem /board_org at Sep 30 03:25:21.000 +epprd_rg:cl_activate_fs(0.879):/board_org[fs_mount:163] mount /board_org +epprd_rg:cl_activate_fs(0.892):/board_org[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(0.892):/board_org[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(0.892):/board_org[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(0.892):/board_org[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/board_org' +epprd_rg:cl_activate_fs(0.892):/board_org[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(0.893):/board_org[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(0.918):/board_org[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(0.920):/board_org[amlog_trace:319] DATE=2023-09-30T03:25:21.251100 +epprd_rg:cl_activate_fs(0.920):/board_org[amlog_trace:320] echo '|2023-09-30T03:25:21.251100|INFO: Activating Filesystems completed|/board_org' +epprd_rg:cl_activate_fs(0.920):/board_org[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(0.921):/board_org[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(0.921):/board_org[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(0.921):/board_org[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(0.921):/board_org[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(0.921):/board_org[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(0.921):/board_org[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(0.921):/board_org[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(0.921):/board_org[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(0.921):/board_org[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(0.921):/board_org[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(0.922):/board_org[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(0.922):/board_org[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(0.924):/board_org[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(0.924):/board_org[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(0.924):/board_org[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(0.924):/board_org[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(0.924):/board_org[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(0.924):/board_org[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(0.924):/board_org[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(0.924):/board_org[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(0.803):/board_org[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.11 \n\t lvname = boardlv \n\t label = /board_org \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:47 2022\n \t time modified = Sat Jan 28 17:10:40 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(0.925):/board_org[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(0.925):/oracle[activate_fs_process_group:527] PS4_LOOP=/oracle +epprd_rg:cl_activate_fs(0.925):/oracle[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(0.925):/oracle[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(0.925):/oracle[activate_fs_process_group:540] fs_mount /oracle fsck epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(0.925):/oracle[fs_mount:69] FS=/oracle +epprd_rg:cl_activate_fs(0.925):/oracle[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(0.925):/oracle[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(0.925):/oracle[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(0.925):/oracle[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(0.925):/oracle[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(0.925):/oracle[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(0.925):/oracle[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(0.925):/oracle[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(0.925):/oracle[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(0.925):/oracle[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(0.925):/oracle[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(0.925):/oracle[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(0.925):/oracle[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(0.925):/oracle[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(0.925):/oracle[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(0.925):/oracle[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(0.925):/oracle[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle +epprd_rg:cl_activate_fs(0.925):/oracle[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(0.925):/oracle[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(0.925):/oracle[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(0.925):/oracle[fs_mount:86] lsfs -c /oracle +epprd_rg:cl_activate_fs(0.926):/oracle[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(0.931):/oracle[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(0.926):/oracle[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle:/dev/oraclelv:jfs2:::41943040:rw:no:no' +epprd_rg:cl_activate_fs(0.931):/oracle[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(0.931):/oracle[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(0.932):/oracle[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(0.926):/oracle[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle:/dev/oraclelv:jfs2:::41943040:rw:no:no' +epprd_rg:cl_activate_fs(0.933):/oracle[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(0.933):/oracle[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(0.934):/oracle[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(0.935):/oracle[fs_mount:100] LV_name=oraclelv +epprd_rg:cl_activate_fs(0.935):/oracle[fs_mount:101] getlvcb -T -A oraclelv +epprd_rg:cl_activate_fs(0.936):/oracle[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(0.953):/oracle[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(0.936):/oracle[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.4 \n\t lvname = oraclelv \n\t label = /oracle \n\t machine id = 44AF14B00 \n\t number lps = 40 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:42 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' +epprd_rg:cl_activate_fs(0.953):/oracle[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(0.953):/oracle[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(0.954):/oracle[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(0.936):/oracle[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.4 \n\t lvname = oraclelv \n\t label = /oracle \n\t machine id = 44AF14B00 \n\t number lps = 40 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:42 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' +epprd_rg:cl_activate_fs(0.955):/oracle[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(0.955):/oracle[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(0.957):/oracle[fs_mount:115] clodmget -q 'name = oraclelv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(0.960):/oracle[fs_mount:115] CuAt_label=/oracle +epprd_rg:cl_activate_fs(0.960):/oracle[fs_mount:118] : At this point, if things are working correctly, /oracle from /etc/filesystems +epprd_rg:cl_activate_fs(0.960):/oracle[fs_mount:119] : should match /oracle from CuAt ODM and /oracle from the LVCB +epprd_rg:cl_activate_fs(0.960):/oracle[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(0.960):/oracle[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(0.960):/oracle[fs_mount:123] [[ /oracle != /oracle ]] +epprd_rg:cl_activate_fs(0.960):/oracle[fs_mount:128] [[ /oracle != /oracle ]] +epprd_rg:cl_activate_fs(0.960):/oracle[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(0.960):/oracle[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(0.961):/oracle[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(0.980):/oracle[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(0.980):/oracle[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(0.980):/oracle[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle' +epprd_rg:cl_activate_fs(0.980):/oracle[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(0.981):/oracle[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.005):/oracle[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.008):/oracle[amlog_trace:319] DATE=2023-09-30T03:25:21.338917 +epprd_rg:cl_activate_fs(1.008):/oracle[amlog_trace:320] echo '|2023-09-30T03:25:21.338917|INFO: Activating Filesystem|/oracle' +epprd_rg:cl_activate_fs(1.008):/oracle[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.008):/oracle[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.011):/oracle[fs_mount:162] : Try to mount filesystem /oracle at Sep 30 03:25:21.000 +epprd_rg:cl_activate_fs(1.011):/oracle[fs_mount:163] mount /oracle +epprd_rg:cl_activate_fs(1.023):/oracle[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.023):/oracle[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.023):/oracle[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.023):/oracle[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle' +epprd_rg:cl_activate_fs(1.023):/oracle[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.024):/oracle[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.048):/oracle[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.051):/oracle[amlog_trace:319] DATE=2023-09-30T03:25:21.381514 +epprd_rg:cl_activate_fs(1.051):/oracle[amlog_trace:320] echo '|2023-09-30T03:25:21.381514|INFO: Activating Filesystems completed|/oracle' +epprd_rg:cl_activate_fs(1.051):/oracle[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.051):/oracle[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.051):/oracle[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.051):/oracle[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.051):/oracle[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.051):/oracle[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.051):/oracle[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.051):/oracle[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.051):/oracle[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.051):/oracle[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.051):/oracle[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.052):/oracle[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.053):/oracle[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.055):/oracle[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.055):/oracle[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.055):/oracle[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.055):/oracle[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.055):/oracle[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.055):/oracle[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.055):/oracle[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.055):/oracle[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(0.936):/oracle[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.4 \n\t lvname = oraclelv \n\t label = /oracle \n\t machine id = 44AF14B00 \n\t number lps = 40 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:42 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.055):/oracle[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[activate_fs_process_group:540] fs_mount /oracle/EPP fsck epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[fs_mount:69] FS=/oracle/EPP +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.055):/oracle/EPP[fs_mount:86] lsfs -c /oracle/EPP +epprd_rg:cl_activate_fs(1.056):/oracle/EPP[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.061):/oracle/EPP[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.056):/oracle/EPP[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP:/dev/epplv:jfs2:::62914560:rw:no:no' +epprd_rg:cl_activate_fs(1.061):/oracle/EPP[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.061):/oracle/EPP[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.062):/oracle/EPP[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.056):/oracle/EPP[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP:/dev/epplv:jfs2:::62914560:rw:no:no' +epprd_rg:cl_activate_fs(1.063):/oracle/EPP[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.064):/oracle/EPP[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.064):/oracle/EPP[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.065):/oracle/EPP[fs_mount:100] LV_name=epplv +epprd_rg:cl_activate_fs(1.065):/oracle/EPP[fs_mount:101] getlvcb -T -A epplv +epprd_rg:cl_activate_fs(1.066):/oracle/EPP[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.083):/oracle/EPP[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.066):/oracle/EPP[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.5 \n\t lvname = epplv \n\t label = /oracle/EPP \n\t machine id = 44AF14B00 \n\t number lps = 60 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' +epprd_rg:cl_activate_fs(1.084):/oracle/EPP[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.084):/oracle/EPP[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.085):/oracle/EPP[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.066):/oracle/EPP[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.5 \n\t lvname = epplv \n\t label = /oracle/EPP \n\t machine id = 44AF14B00 \n\t number lps = 60 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' +epprd_rg:cl_activate_fs(1.085):/oracle/EPP[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.086):/oracle/EPP[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.088):/oracle/EPP[fs_mount:115] clodmget -q 'name = epplv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.091):/oracle/EPP[fs_mount:115] CuAt_label=/oracle/EPP +epprd_rg:cl_activate_fs(1.091):/oracle/EPP[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP from /etc/filesystems +epprd_rg:cl_activate_fs(1.091):/oracle/EPP[fs_mount:119] : should match /oracle/EPP from CuAt ODM and /oracle/EPP from the LVCB +epprd_rg:cl_activate_fs(1.091):/oracle/EPP[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.091):/oracle/EPP[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.091):/oracle/EPP[fs_mount:123] [[ /oracle/EPP != /oracle/EPP ]] +epprd_rg:cl_activate_fs(1.091):/oracle/EPP[fs_mount:128] [[ /oracle/EPP != /oracle/EPP ]] +epprd_rg:cl_activate_fs(1.091):/oracle/EPP[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.091):/oracle/EPP[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.091):/oracle/EPP[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.112):/oracle/EPP[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.112):/oracle/EPP[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.112):/oracle/EPP[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP' +epprd_rg:cl_activate_fs(1.112):/oracle/EPP[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.113):/oracle/EPP[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.137):/oracle/EPP[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.140):/oracle/EPP[amlog_trace:319] DATE=2023-09-30T03:25:21.470893 +epprd_rg:cl_activate_fs(1.140):/oracle/EPP[amlog_trace:320] echo '|2023-09-30T03:25:21.470893|INFO: Activating Filesystem|/oracle/EPP' +epprd_rg:cl_activate_fs(1.140):/oracle/EPP[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.140):/oracle/EPP[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.143):/oracle/EPP[fs_mount:162] : Try to mount filesystem /oracle/EPP at Sep 30 03:25:21.000 +epprd_rg:cl_activate_fs(1.143):/oracle/EPP[fs_mount:163] mount /oracle/EPP +epprd_rg:cl_activate_fs(1.155):/oracle/EPP[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.155):/oracle/EPP[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.155):/oracle/EPP[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.155):/oracle/EPP[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP' +epprd_rg:cl_activate_fs(1.155):/oracle/EPP[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.156):/oracle/EPP[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.180):/oracle/EPP[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.183):/oracle/EPP[amlog_trace:319] DATE=2023-09-30T03:25:21.513606 +epprd_rg:cl_activate_fs(1.183):/oracle/EPP[amlog_trace:320] echo '|2023-09-30T03:25:21.513606|INFO: Activating Filesystems completed|/oracle/EPP' +epprd_rg:cl_activate_fs(1.183):/oracle/EPP[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.183):/oracle/EPP[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.183):/oracle/EPP[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.183):/oracle/EPP[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.183):/oracle/EPP[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.183):/oracle/EPP[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.183):/oracle/EPP[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.183):/oracle/EPP[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.183):/oracle/EPP[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.183):/oracle/EPP[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.183):/oracle/EPP[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.184):/oracle/EPP[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.185):/oracle/EPP[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.187):/oracle/EPP[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.187):/oracle/EPP[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.187):/oracle/EPP[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.187):/oracle/EPP[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.187):/oracle/EPP[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.187):/oracle/EPP[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.187):/oracle/EPP[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.187):/oracle/EPP[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.066):/oracle/EPP[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.5 \n\t lvname = epplv \n\t label = /oracle/EPP \n\t machine id = 44AF14B00 \n\t number lps = 60 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.187):/oracle/EPP[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[activate_fs_process_group:540] fs_mount /oracle/EPP/mirrlogA fsck epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[fs_mount:69] FS=/oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.187):/oracle/EPP/mirrlogA[fs_mount:86] lsfs -c /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.188):/oracle/EPP/mirrlogA[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.193):/oracle/EPP/mirrlogA[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.189):/oracle/EPP/mirrlogA[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogA:/dev/mirrlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.193):/oracle/EPP/mirrlogA[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.193):/oracle/EPP/mirrlogA[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.194):/oracle/EPP/mirrlogA[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.189):/oracle/EPP/mirrlogA[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogA:/dev/mirrlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.195):/oracle/EPP/mirrlogA[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.196):/oracle/EPP/mirrlogA[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.196):/oracle/EPP/mirrlogA[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.197):/oracle/EPP/mirrlogA[fs_mount:100] LV_name=mirrlogAlv +epprd_rg:cl_activate_fs(1.197):/oracle/EPP/mirrlogA[fs_mount:101] getlvcb -T -A mirrlogAlv +epprd_rg:cl_activate_fs(1.198):/oracle/EPP/mirrlogA[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.216):/oracle/EPP/mirrlogA[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.199):/oracle/EPP/mirrlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.14 \n\t lvname = mirrlogAlv \n\t label = /oracle/EPP/mirrlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' +epprd_rg:cl_activate_fs(1.216):/oracle/EPP/mirrlogA[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.216):/oracle/EPP/mirrlogA[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.217):/oracle/EPP/mirrlogA[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.199):/oracle/EPP/mirrlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.14 \n\t lvname = mirrlogAlv \n\t label = /oracle/EPP/mirrlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' +epprd_rg:cl_activate_fs(1.218):/oracle/EPP/mirrlogA[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.219):/oracle/EPP/mirrlogA[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.220):/oracle/EPP/mirrlogA[fs_mount:115] clodmget -q 'name = mirrlogAlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.223):/oracle/EPP/mirrlogA[fs_mount:115] CuAt_label=/oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.223):/oracle/EPP/mirrlogA[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/mirrlogA from /etc/filesystems +epprd_rg:cl_activate_fs(1.224):/oracle/EPP/mirrlogA[fs_mount:119] : should match /oracle/EPP/mirrlogA from CuAt ODM and /oracle/EPP/mirrlogA from the LVCB +epprd_rg:cl_activate_fs(1.224):/oracle/EPP/mirrlogA[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.224):/oracle/EPP/mirrlogA[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.224):/oracle/EPP/mirrlogA[fs_mount:123] [[ /oracle/EPP/mirrlogA != /oracle/EPP/mirrlogA ]] +epprd_rg:cl_activate_fs(1.224):/oracle/EPP/mirrlogA[fs_mount:128] [[ /oracle/EPP/mirrlogA != /oracle/EPP/mirrlogA ]] +epprd_rg:cl_activate_fs(1.224):/oracle/EPP/mirrlogA[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.224):/oracle/EPP/mirrlogA[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.224):/oracle/EPP/mirrlogA[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.244):/oracle/EPP/mirrlogA[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.244):/oracle/EPP/mirrlogA[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.244):/oracle/EPP/mirrlogA[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_activate_fs(1.244):/oracle/EPP/mirrlogA[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.245):/oracle/EPP/mirrlogA[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.269):/oracle/EPP/mirrlogA[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.272):/oracle/EPP/mirrlogA[amlog_trace:319] DATE=2023-09-30T03:25:21.602341 +epprd_rg:cl_activate_fs(1.272):/oracle/EPP/mirrlogA[amlog_trace:320] echo '|2023-09-30T03:25:21.602341|INFO: Activating Filesystem|/oracle/EPP/mirrlogA' +epprd_rg:cl_activate_fs(1.272):/oracle/EPP/mirrlogA[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.272):/oracle/EPP/mirrlogA[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.274):/oracle/EPP/mirrlogA[fs_mount:162] : Try to mount filesystem /oracle/EPP/mirrlogA at Sep 30 03:25:21.000 +epprd_rg:cl_activate_fs(1.274):/oracle/EPP/mirrlogA[fs_mount:163] mount /oracle/EPP/mirrlogA +epprd_rg:cl_activate_fs(1.287):/oracle/EPP/mirrlogA[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.287):/oracle/EPP/mirrlogA[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.287):/oracle/EPP/mirrlogA[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.287):/oracle/EPP/mirrlogA[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/mirrlogA' +epprd_rg:cl_activate_fs(1.287):/oracle/EPP/mirrlogA[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.288):/oracle/EPP/mirrlogA[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.312):/oracle/EPP/mirrlogA[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.315):/oracle/EPP/mirrlogA[amlog_trace:319] DATE=2023-09-30T03:25:21.645442 +epprd_rg:cl_activate_fs(1.315):/oracle/EPP/mirrlogA[amlog_trace:320] echo '|2023-09-30T03:25:21.645442|INFO: Activating Filesystems completed|/oracle/EPP/mirrlogA' +epprd_rg:cl_activate_fs(1.315):/oracle/EPP/mirrlogA[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.315):/oracle/EPP/mirrlogA[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.315):/oracle/EPP/mirrlogA[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.315):/oracle/EPP/mirrlogA[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.315):/oracle/EPP/mirrlogA[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.315):/oracle/EPP/mirrlogA[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.315):/oracle/EPP/mirrlogA[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.315):/oracle/EPP/mirrlogA[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.315):/oracle/EPP/mirrlogA[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.315):/oracle/EPP/mirrlogA[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.315):/oracle/EPP/mirrlogA[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.316):/oracle/EPP/mirrlogA[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.317):/oracle/EPP/mirrlogA[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogA[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogA[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogA[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogA[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogA[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogA[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogA[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogA[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.199):/oracle/EPP/mirrlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.14 \n\t lvname = mirrlogAlv \n\t label = /oracle/EPP/mirrlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Jan 28 17:10:41 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogA[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[activate_fs_process_group:540] fs_mount /oracle/EPP/mirrlogB fsck epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:69] FS=/oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.319):/oracle/EPP/mirrlogB[fs_mount:86] lsfs -c /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.320):/oracle/EPP/mirrlogB[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.320):/oracle/EPP/mirrlogB[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogB:/dev/mirrlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.325):/oracle/EPP/mirrlogB[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.326):/oracle/EPP/mirrlogB[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.320):/oracle/EPP/mirrlogB[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/mirrlogB:/dev/mirrlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.327):/oracle/EPP/mirrlogB[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.328):/oracle/EPP/mirrlogB[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.328):/oracle/EPP/mirrlogB[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.329):/oracle/EPP/mirrlogB[fs_mount:100] LV_name=mirrlogBlv +epprd_rg:cl_activate_fs(1.329):/oracle/EPP/mirrlogB[fs_mount:101] getlvcb -T -A mirrlogBlv +epprd_rg:cl_activate_fs(1.330):/oracle/EPP/mirrlogB[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.348):/oracle/EPP/mirrlogB[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.330):/oracle/EPP/mirrlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.15 \n\t lvname = mirrlogBlv \n\t label = /oracle/EPP/mirrlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:50 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.348):/oracle/EPP/mirrlogB[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.348):/oracle/EPP/mirrlogB[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.349):/oracle/EPP/mirrlogB[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.330):/oracle/EPP/mirrlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.15 \n\t lvname = mirrlogBlv \n\t label = /oracle/EPP/mirrlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:50 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.349):/oracle/EPP/mirrlogB[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.350):/oracle/EPP/mirrlogB[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.352):/oracle/EPP/mirrlogB[fs_mount:115] clodmget -q 'name = mirrlogBlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.355):/oracle/EPP/mirrlogB[fs_mount:115] CuAt_label=/oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.355):/oracle/EPP/mirrlogB[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/mirrlogB from /etc/filesystems +epprd_rg:cl_activate_fs(1.355):/oracle/EPP/mirrlogB[fs_mount:119] : should match /oracle/EPP/mirrlogB from CuAt ODM and /oracle/EPP/mirrlogB from the LVCB +epprd_rg:cl_activate_fs(1.355):/oracle/EPP/mirrlogB[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.355):/oracle/EPP/mirrlogB[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.355):/oracle/EPP/mirrlogB[fs_mount:123] [[ /oracle/EPP/mirrlogB != /oracle/EPP/mirrlogB ]] +epprd_rg:cl_activate_fs(1.355):/oracle/EPP/mirrlogB[fs_mount:128] [[ /oracle/EPP/mirrlogB != /oracle/EPP/mirrlogB ]] +epprd_rg:cl_activate_fs(1.355):/oracle/EPP/mirrlogB[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.355):/oracle/EPP/mirrlogB[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.355):/oracle/EPP/mirrlogB[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.375):/oracle/EPP/mirrlogB[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.375):/oracle/EPP/mirrlogB[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.375):/oracle/EPP/mirrlogB[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_activate_fs(1.375):/oracle/EPP/mirrlogB[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.376):/oracle/EPP/mirrlogB[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.400):/oracle/EPP/mirrlogB[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.403):/oracle/EPP/mirrlogB[amlog_trace:319] DATE=2023-09-30T03:25:21.733944 +epprd_rg:cl_activate_fs(1.403):/oracle/EPP/mirrlogB[amlog_trace:320] echo '|2023-09-30T03:25:21.733944|INFO: Activating Filesystem|/oracle/EPP/mirrlogB' +epprd_rg:cl_activate_fs(1.403):/oracle/EPP/mirrlogB[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.403):/oracle/EPP/mirrlogB[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.406):/oracle/EPP/mirrlogB[fs_mount:162] : Try to mount filesystem /oracle/EPP/mirrlogB at Sep 30 03:25:21.000 +epprd_rg:cl_activate_fs(1.406):/oracle/EPP/mirrlogB[fs_mount:163] mount /oracle/EPP/mirrlogB +epprd_rg:cl_activate_fs(1.418):/oracle/EPP/mirrlogB[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.418):/oracle/EPP/mirrlogB[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.418):/oracle/EPP/mirrlogB[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.418):/oracle/EPP/mirrlogB[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/mirrlogB' +epprd_rg:cl_activate_fs(1.418):/oracle/EPP/mirrlogB[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.419):/oracle/EPP/mirrlogB[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.443):/oracle/EPP/mirrlogB[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.446):/oracle/EPP/mirrlogB[amlog_trace:319] DATE=2023-09-30T03:25:21.776628 +epprd_rg:cl_activate_fs(1.446):/oracle/EPP/mirrlogB[amlog_trace:320] echo '|2023-09-30T03:25:21.776628|INFO: Activating Filesystems completed|/oracle/EPP/mirrlogB' +epprd_rg:cl_activate_fs(1.446):/oracle/EPP/mirrlogB[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.446):/oracle/EPP/mirrlogB[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.446):/oracle/EPP/mirrlogB[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.446):/oracle/EPP/mirrlogB[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.446):/oracle/EPP/mirrlogB[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.446):/oracle/EPP/mirrlogB[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.446):/oracle/EPP/mirrlogB[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.446):/oracle/EPP/mirrlogB[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.446):/oracle/EPP/mirrlogB[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.446):/oracle/EPP/mirrlogB[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.446):/oracle/EPP/mirrlogB[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.447):/oracle/EPP/mirrlogB[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.448):/oracle/EPP/mirrlogB[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/mirrlogB[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/mirrlogB[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/mirrlogB[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/mirrlogB[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/mirrlogB[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/mirrlogB[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/mirrlogB[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/mirrlogB[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.330):/oracle/EPP/mirrlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.15 \n\t lvname = mirrlogBlv \n\t label = /oracle/EPP/mirrlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:50 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/mirrlogB[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/oraarch +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[activate_fs_process_group:540] fs_mount /oracle/EPP/oraarch fsck epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:69] FS=/oracle/EPP/oraarch +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.450):/oracle/EPP/oraarch[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.451):/oracle/EPP/oraarch[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/oraarch +epprd_rg:cl_activate_fs(1.451):/oracle/EPP/oraarch[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.451):/oracle/EPP/oraarch[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.451):/oracle/EPP/oraarch[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.451):/oracle/EPP/oraarch[fs_mount:86] lsfs -c /oracle/EPP/oraarch +epprd_rg:cl_activate_fs(1.452):/oracle/EPP/oraarch[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.456):/oracle/EPP/oraarch[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.452):/oracle/EPP/oraarch[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/oraarch:/dev/oraarchlv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(1.456):/oracle/EPP/oraarch[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.456):/oracle/EPP/oraarch[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.457):/oracle/EPP/oraarch[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.452):/oracle/EPP/oraarch[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/oraarch:/dev/oraarchlv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(1.459):/oracle/EPP/oraarch[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.459):/oracle/EPP/oraarch[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.459):/oracle/EPP/oraarch[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.460):/oracle/EPP/oraarch[fs_mount:100] LV_name=oraarchlv +epprd_rg:cl_activate_fs(1.460):/oracle/EPP/oraarch[fs_mount:101] getlvcb -T -A oraarchlv +epprd_rg:cl_activate_fs(1.461):/oracle/EPP/oraarch[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.479):/oracle/EPP/oraarch[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.462):/oracle/EPP/oraarch[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.6 \n\t lvname = oraarchlv \n\t label = /oracle/EPP/oraarch \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.479):/oracle/EPP/oraarch[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.479):/oracle/EPP/oraarch[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.480):/oracle/EPP/oraarch[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.462):/oracle/EPP/oraarch[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.6 \n\t lvname = oraarchlv \n\t label = /oracle/EPP/oraarch \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.481):/oracle/EPP/oraarch[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.482):/oracle/EPP/oraarch[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.483):/oracle/EPP/oraarch[fs_mount:115] clodmget -q 'name = oraarchlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.487):/oracle/EPP/oraarch[fs_mount:115] CuAt_label=/oracle/EPP/oraarch +epprd_rg:cl_activate_fs(1.487):/oracle/EPP/oraarch[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/oraarch from /etc/filesystems +epprd_rg:cl_activate_fs(1.487):/oracle/EPP/oraarch[fs_mount:119] : should match /oracle/EPP/oraarch from CuAt ODM and /oracle/EPP/oraarch from the LVCB +epprd_rg:cl_activate_fs(1.487):/oracle/EPP/oraarch[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.487):/oracle/EPP/oraarch[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.487):/oracle/EPP/oraarch[fs_mount:123] [[ /oracle/EPP/oraarch != /oracle/EPP/oraarch ]] +epprd_rg:cl_activate_fs(1.487):/oracle/EPP/oraarch[fs_mount:128] [[ /oracle/EPP/oraarch != /oracle/EPP/oraarch ]] +epprd_rg:cl_activate_fs(1.487):/oracle/EPP/oraarch[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.487):/oracle/EPP/oraarch[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.487):/oracle/EPP/oraarch[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.507):/oracle/EPP/oraarch[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.507):/oracle/EPP/oraarch[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.507):/oracle/EPP/oraarch[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_activate_fs(1.507):/oracle/EPP/oraarch[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.508):/oracle/EPP/oraarch[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.532):/oracle/EPP/oraarch[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.535):/oracle/EPP/oraarch[amlog_trace:319] DATE=2023-09-30T03:25:21.865813 +epprd_rg:cl_activate_fs(1.535):/oracle/EPP/oraarch[amlog_trace:320] echo '|2023-09-30T03:25:21.865813|INFO: Activating Filesystem|/oracle/EPP/oraarch' +epprd_rg:cl_activate_fs(1.535):/oracle/EPP/oraarch[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.535):/oracle/EPP/oraarch[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.538):/oracle/EPP/oraarch[fs_mount:162] : Try to mount filesystem /oracle/EPP/oraarch at Sep 30 03:25:21.000 +epprd_rg:cl_activate_fs(1.538):/oracle/EPP/oraarch[fs_mount:163] mount /oracle/EPP/oraarch +epprd_rg:cl_activate_fs(1.550):/oracle/EPP/oraarch[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.550):/oracle/EPP/oraarch[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.550):/oracle/EPP/oraarch[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.550):/oracle/EPP/oraarch[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/oraarch' +epprd_rg:cl_activate_fs(1.550):/oracle/EPP/oraarch[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.551):/oracle/EPP/oraarch[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.575):/oracle/EPP/oraarch[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.578):/oracle/EPP/oraarch[amlog_trace:319] DATE=2023-09-30T03:25:21.908542 +epprd_rg:cl_activate_fs(1.578):/oracle/EPP/oraarch[amlog_trace:320] echo '|2023-09-30T03:25:21.908542|INFO: Activating Filesystems completed|/oracle/EPP/oraarch' +epprd_rg:cl_activate_fs(1.578):/oracle/EPP/oraarch[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.578):/oracle/EPP/oraarch[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.578):/oracle/EPP/oraarch[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.578):/oracle/EPP/oraarch[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.578):/oracle/EPP/oraarch[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.578):/oracle/EPP/oraarch[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.578):/oracle/EPP/oraarch[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.578):/oracle/EPP/oraarch[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.578):/oracle/EPP/oraarch[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.578):/oracle/EPP/oraarch[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.578):/oracle/EPP/oraarch[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.579):/oracle/EPP/oraarch[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.580):/oracle/EPP/oraarch[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/oraarch[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/oraarch[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/oraarch[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/oraarch[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/oraarch[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/oraarch[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/oraarch[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/oraarch[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.462):/oracle/EPP/oraarch[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.6 \n\t lvname = oraarchlv \n\t label = /oracle/EPP/oraarch \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:43 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/oraarch[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/origlogA +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[activate_fs_process_group:540] fs_mount /oracle/EPP/origlogA fsck epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[fs_mount:69] FS=/oracle/EPP/origlogA +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/origlogA +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.582):/oracle/EPP/origlogA[fs_mount:86] lsfs -c /oracle/EPP/origlogA +epprd_rg:cl_activate_fs(1.583):/oracle/EPP/origlogA[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.588):/oracle/EPP/origlogA[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.584):/oracle/EPP/origlogA[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogA:/dev/origlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.588):/oracle/EPP/origlogA[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.588):/oracle/EPP/origlogA[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.589):/oracle/EPP/origlogA[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.584):/oracle/EPP/origlogA[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogA:/dev/origlogAlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.591):/oracle/EPP/origlogA[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.591):/oracle/EPP/origlogA[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.591):/oracle/EPP/origlogA[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.592):/oracle/EPP/origlogA[fs_mount:100] LV_name=origlogAlv +epprd_rg:cl_activate_fs(1.592):/oracle/EPP/origlogA[fs_mount:101] getlvcb -T -A origlogAlv +epprd_rg:cl_activate_fs(1.593):/oracle/EPP/origlogA[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.611):/oracle/EPP/origlogA[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.594):/oracle/EPP/origlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.12 \n\t lvname = origlogAlv \n\t label = /oracle/EPP/origlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:48 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.612):/oracle/EPP/origlogA[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.612):/oracle/EPP/origlogA[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.613):/oracle/EPP/origlogA[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.594):/oracle/EPP/origlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.12 \n\t lvname = origlogAlv \n\t label = /oracle/EPP/origlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:48 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.614):/oracle/EPP/origlogA[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.614):/oracle/EPP/origlogA[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.616):/oracle/EPP/origlogA[fs_mount:115] clodmget -q 'name = origlogAlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.619):/oracle/EPP/origlogA[fs_mount:115] CuAt_label=/oracle/EPP/origlogA +epprd_rg:cl_activate_fs(1.619):/oracle/EPP/origlogA[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/origlogA from /etc/filesystems +epprd_rg:cl_activate_fs(1.619):/oracle/EPP/origlogA[fs_mount:119] : should match /oracle/EPP/origlogA from CuAt ODM and /oracle/EPP/origlogA from the LVCB +epprd_rg:cl_activate_fs(1.619):/oracle/EPP/origlogA[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.619):/oracle/EPP/origlogA[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.619):/oracle/EPP/origlogA[fs_mount:123] [[ /oracle/EPP/origlogA != /oracle/EPP/origlogA ]] +epprd_rg:cl_activate_fs(1.619):/oracle/EPP/origlogA[fs_mount:128] [[ /oracle/EPP/origlogA != /oracle/EPP/origlogA ]] +epprd_rg:cl_activate_fs(1.619):/oracle/EPP/origlogA[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.619):/oracle/EPP/origlogA[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.619):/oracle/EPP/origlogA[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.639):/oracle/EPP/origlogA[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.639):/oracle/EPP/origlogA[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.639):/oracle/EPP/origlogA[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_activate_fs(1.639):/oracle/EPP/origlogA[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.640):/oracle/EPP/origlogA[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.665):/oracle/EPP/origlogA[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.667):/oracle/EPP/origlogA[amlog_trace:319] DATE=2023-09-30T03:25:21.998053 +epprd_rg:cl_activate_fs(1.667):/oracle/EPP/origlogA[amlog_trace:320] echo '|2023-09-30T03:25:21.998053|INFO: Activating Filesystem|/oracle/EPP/origlogA' +epprd_rg:cl_activate_fs(1.667):/oracle/EPP/origlogA[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.668):/oracle/EPP/origlogA[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.670):/oracle/EPP/origlogA[fs_mount:162] : Try to mount filesystem /oracle/EPP/origlogA at Sep 30 03:25:21.000 +epprd_rg:cl_activate_fs(1.670):/oracle/EPP/origlogA[fs_mount:163] mount /oracle/EPP/origlogA +epprd_rg:cl_activate_fs(1.682):/oracle/EPP/origlogA[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.682):/oracle/EPP/origlogA[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.682):/oracle/EPP/origlogA[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.682):/oracle/EPP/origlogA[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/origlogA' +epprd_rg:cl_activate_fs(1.682):/oracle/EPP/origlogA[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.683):/oracle/EPP/origlogA[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.708):/oracle/EPP/origlogA[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.711):/oracle/EPP/origlogA[amlog_trace:319] DATE=2023-09-30T03:25:22.041302 +epprd_rg:cl_activate_fs(1.711):/oracle/EPP/origlogA[amlog_trace:320] echo '|2023-09-30T03:25:22.041302|INFO: Activating Filesystems completed|/oracle/EPP/origlogA' +epprd_rg:cl_activate_fs(1.711):/oracle/EPP/origlogA[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.711):/oracle/EPP/origlogA[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.711):/oracle/EPP/origlogA[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.711):/oracle/EPP/origlogA[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.711):/oracle/EPP/origlogA[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.711):/oracle/EPP/origlogA[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.711):/oracle/EPP/origlogA[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.711):/oracle/EPP/origlogA[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.711):/oracle/EPP/origlogA[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.711):/oracle/EPP/origlogA[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.711):/oracle/EPP/origlogA[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.712):/oracle/EPP/origlogA[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.713):/oracle/EPP/origlogA[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.714):/oracle/EPP/origlogA[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.714):/oracle/EPP/origlogA[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogA[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogA[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogA[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogA[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogA[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogA[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.594):/oracle/EPP/origlogA[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.12 \n\t lvname = origlogAlv \n\t label = /oracle/EPP/origlogA \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:48 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogA[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/origlogB +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[activate_fs_process_group:540] fs_mount /oracle/EPP/origlogB fsck epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[fs_mount:69] FS=/oracle/EPP/origlogB +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/origlogB +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.715):/oracle/EPP/origlogB[fs_mount:86] lsfs -c /oracle/EPP/origlogB +epprd_rg:cl_activate_fs(1.716):/oracle/EPP/origlogB[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.721):/oracle/EPP/origlogB[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.716):/oracle/EPP/origlogB[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogB:/dev/origlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.721):/oracle/EPP/origlogB[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.721):/oracle/EPP/origlogB[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.722):/oracle/EPP/origlogB[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.716):/oracle/EPP/origlogB[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/origlogB:/dev/origlogBlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs(1.723):/oracle/EPP/origlogB[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.724):/oracle/EPP/origlogB[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.724):/oracle/EPP/origlogB[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.725):/oracle/EPP/origlogB[fs_mount:100] LV_name=origlogBlv +epprd_rg:cl_activate_fs(1.725):/oracle/EPP/origlogB[fs_mount:101] getlvcb -T -A origlogBlv +epprd_rg:cl_activate_fs(1.726):/oracle/EPP/origlogB[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.745):/oracle/EPP/origlogB[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.727):/oracle/EPP/origlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.13 \n\t lvname = origlogBlv \n\t label = /oracle/EPP/origlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.745):/oracle/EPP/origlogB[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.745):/oracle/EPP/origlogB[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.746):/oracle/EPP/origlogB[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.727):/oracle/EPP/origlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.13 \n\t lvname = origlogBlv \n\t label = /oracle/EPP/origlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' +epprd_rg:cl_activate_fs(1.747):/oracle/EPP/origlogB[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.747):/oracle/EPP/origlogB[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.749):/oracle/EPP/origlogB[fs_mount:115] clodmget -q 'name = origlogBlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.752):/oracle/EPP/origlogB[fs_mount:115] CuAt_label=/oracle/EPP/origlogB +epprd_rg:cl_activate_fs(1.752):/oracle/EPP/origlogB[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/origlogB from /etc/filesystems +epprd_rg:cl_activate_fs(1.752):/oracle/EPP/origlogB[fs_mount:119] : should match /oracle/EPP/origlogB from CuAt ODM and /oracle/EPP/origlogB from the LVCB +epprd_rg:cl_activate_fs(1.752):/oracle/EPP/origlogB[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.752):/oracle/EPP/origlogB[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.752):/oracle/EPP/origlogB[fs_mount:123] [[ /oracle/EPP/origlogB != /oracle/EPP/origlogB ]] +epprd_rg:cl_activate_fs(1.752):/oracle/EPP/origlogB[fs_mount:128] [[ /oracle/EPP/origlogB != /oracle/EPP/origlogB ]] +epprd_rg:cl_activate_fs(1.752):/oracle/EPP/origlogB[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.752):/oracle/EPP/origlogB[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.752):/oracle/EPP/origlogB[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.773):/oracle/EPP/origlogB[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.773):/oracle/EPP/origlogB[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.773):/oracle/EPP/origlogB[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_activate_fs(1.773):/oracle/EPP/origlogB[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.774):/oracle/EPP/origlogB[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.798):/oracle/EPP/origlogB[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.801):/oracle/EPP/origlogB[amlog_trace:319] DATE=2023-09-30T03:25:22.131646 +epprd_rg:cl_activate_fs(1.801):/oracle/EPP/origlogB[amlog_trace:320] echo '|2023-09-30T03:25:22.131646|INFO: Activating Filesystem|/oracle/EPP/origlogB' +epprd_rg:cl_activate_fs(1.801):/oracle/EPP/origlogB[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.801):/oracle/EPP/origlogB[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.804):/oracle/EPP/origlogB[fs_mount:162] : Try to mount filesystem /oracle/EPP/origlogB at Sep 30 03:25:22.000 +epprd_rg:cl_activate_fs(1.804):/oracle/EPP/origlogB[fs_mount:163] mount /oracle/EPP/origlogB +epprd_rg:cl_activate_fs(1.816):/oracle/EPP/origlogB[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.816):/oracle/EPP/origlogB[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.816):/oracle/EPP/origlogB[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.816):/oracle/EPP/origlogB[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/origlogB' +epprd_rg:cl_activate_fs(1.816):/oracle/EPP/origlogB[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.817):/oracle/EPP/origlogB[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.841):/oracle/EPP/origlogB[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.844):/oracle/EPP/origlogB[amlog_trace:319] DATE=2023-09-30T03:25:22.174580 +epprd_rg:cl_activate_fs(1.844):/oracle/EPP/origlogB[amlog_trace:320] echo '|2023-09-30T03:25:22.174580|INFO: Activating Filesystems completed|/oracle/EPP/origlogB' +epprd_rg:cl_activate_fs(1.844):/oracle/EPP/origlogB[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.844):/oracle/EPP/origlogB[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.844):/oracle/EPP/origlogB[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.844):/oracle/EPP/origlogB[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.844):/oracle/EPP/origlogB[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.844):/oracle/EPP/origlogB[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.844):/oracle/EPP/origlogB[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.844):/oracle/EPP/origlogB[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.844):/oracle/EPP/origlogB[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.844):/oracle/EPP/origlogB[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.844):/oracle/EPP/origlogB[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.845):/oracle/EPP/origlogB[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.846):/oracle/EPP/origlogB[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/origlogB[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/origlogB[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/origlogB[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/origlogB[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/origlogB[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/origlogB[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/origlogB[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/origlogB[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.727):/oracle/EPP/origlogB[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.13 \n\t lvname = origlogBlv \n\t label = /oracle/EPP/origlogB \n\t machine id = 44AF14B00 \n\t number lps = 10 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:49 2022\n \t time modified = Sat Jan 28 17:10:42 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/origlogB[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/sapdata1[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/sapdata1[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/sapdata1[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/sapdata1[activate_fs_process_group:540] fs_mount /oracle/EPP/sapdata1 fsck epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/sapdata1[fs_mount:69] FS=/oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/sapdata1[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/sapdata1[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.848):/oracle/EPP/sapdata1[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.849):/oracle/EPP/sapdata1[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(1.849):/oracle/EPP/sapdata1[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.849):/oracle/EPP/sapdata1[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.849):/oracle/EPP/sapdata1[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.849):/oracle/EPP/sapdata1[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.849):/oracle/EPP/sapdata1[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.849):/oracle/EPP/sapdata1[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.849):/oracle/EPP/sapdata1[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.849):/oracle/EPP/sapdata1[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.849):/oracle/EPP/sapdata1[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.849):/oracle/EPP/sapdata1[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.849):/oracle/EPP/sapdata1[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.849):/oracle/EPP/sapdata1[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.849):/oracle/EPP/sapdata1[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(1.849):/oracle/EPP/sapdata1[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.849):/oracle/EPP/sapdata1[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.849):/oracle/EPP/sapdata1[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.849):/oracle/EPP/sapdata1[fs_mount:86] lsfs -c /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(1.850):/oracle/EPP/sapdata1[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.855):/oracle/EPP/sapdata1[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.850):/oracle/EPP/sapdata1[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata1:/dev/sapdata1lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(1.855):/oracle/EPP/sapdata1[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.855):/oracle/EPP/sapdata1[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.856):/oracle/EPP/sapdata1[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.850):/oracle/EPP/sapdata1[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata1:/dev/sapdata1lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(1.857):/oracle/EPP/sapdata1[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.857):/oracle/EPP/sapdata1[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.857):/oracle/EPP/sapdata1[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.859):/oracle/EPP/sapdata1[fs_mount:100] LV_name=sapdata1lv +epprd_rg:cl_activate_fs(1.859):/oracle/EPP/sapdata1[fs_mount:101] getlvcb -T -A sapdata1lv +epprd_rg:cl_activate_fs(1.860):/oracle/EPP/sapdata1[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(1.878):/oracle/EPP/sapdata1[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.860):/oracle/EPP/sapdata1[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.7 \n\t lvname = sapdata1lv \n\t label = /oracle/EPP/sapdata1 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:44 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' +epprd_rg:cl_activate_fs(1.878):/oracle/EPP/sapdata1[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(1.878):/oracle/EPP/sapdata1[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.879):/oracle/EPP/sapdata1[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.860):/oracle/EPP/sapdata1[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.7 \n\t lvname = sapdata1lv \n\t label = /oracle/EPP/sapdata1 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:44 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' +epprd_rg:cl_activate_fs(1.880):/oracle/EPP/sapdata1[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(1.880):/oracle/EPP/sapdata1[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(1.882):/oracle/EPP/sapdata1[fs_mount:115] clodmget -q 'name = sapdata1lv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(1.885):/oracle/EPP/sapdata1[fs_mount:115] CuAt_label=/oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(1.885):/oracle/EPP/sapdata1[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/sapdata1 from /etc/filesystems +epprd_rg:cl_activate_fs(1.885):/oracle/EPP/sapdata1[fs_mount:119] : should match /oracle/EPP/sapdata1 from CuAt ODM and /oracle/EPP/sapdata1 from the LVCB +epprd_rg:cl_activate_fs(1.885):/oracle/EPP/sapdata1[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(1.885):/oracle/EPP/sapdata1[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(1.885):/oracle/EPP/sapdata1[fs_mount:123] [[ /oracle/EPP/sapdata1 != /oracle/EPP/sapdata1 ]] +epprd_rg:cl_activate_fs(1.885):/oracle/EPP/sapdata1[fs_mount:128] [[ /oracle/EPP/sapdata1 != /oracle/EPP/sapdata1 ]] +epprd_rg:cl_activate_fs(1.885):/oracle/EPP/sapdata1[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.886):/oracle/EPP/sapdata1[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(1.886):/oracle/EPP/sapdata1[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(1.906):/oracle/EPP/sapdata1[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.906):/oracle/EPP/sapdata1[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(1.906):/oracle/EPP/sapdata1[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_activate_fs(1.906):/oracle/EPP/sapdata1[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.907):/oracle/EPP/sapdata1[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.931):/oracle/EPP/sapdata1[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.934):/oracle/EPP/sapdata1[amlog_trace:319] DATE=2023-09-30T03:25:22.264523 +epprd_rg:cl_activate_fs(1.934):/oracle/EPP/sapdata1[amlog_trace:320] echo '|2023-09-30T03:25:22.264523|INFO: Activating Filesystem|/oracle/EPP/sapdata1' +epprd_rg:cl_activate_fs(1.934):/oracle/EPP/sapdata1[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.934):/oracle/EPP/sapdata1[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(1.937):/oracle/EPP/sapdata1[fs_mount:162] : Try to mount filesystem /oracle/EPP/sapdata1 at Sep 30 03:25:22.000 +epprd_rg:cl_activate_fs(1.937):/oracle/EPP/sapdata1[fs_mount:163] mount /oracle/EPP/sapdata1 +epprd_rg:cl_activate_fs(1.949):/oracle/EPP/sapdata1[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(1.949):/oracle/EPP/sapdata1[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(1.949):/oracle/EPP/sapdata1[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(1.949):/oracle/EPP/sapdata1[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/sapdata1' +epprd_rg:cl_activate_fs(1.949):/oracle/EPP/sapdata1[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(1.950):/oracle/EPP/sapdata1[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(1.974):/oracle/EPP/sapdata1[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(1.977):/oracle/EPP/sapdata1[amlog_trace:319] DATE=2023-09-30T03:25:22.307553 +epprd_rg:cl_activate_fs(1.977):/oracle/EPP/sapdata1[amlog_trace:320] echo '|2023-09-30T03:25:22.307553|INFO: Activating Filesystems completed|/oracle/EPP/sapdata1' +epprd_rg:cl_activate_fs(1.977):/oracle/EPP/sapdata1[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(1.977):/oracle/EPP/sapdata1[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(1.977):/oracle/EPP/sapdata1[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(1.977):/oracle/EPP/sapdata1[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(1.977):/oracle/EPP/sapdata1[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(1.977):/oracle/EPP/sapdata1[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(1.977):/oracle/EPP/sapdata1[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(1.977):/oracle/EPP/sapdata1[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(1.977):/oracle/EPP/sapdata1[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(1.977):/oracle/EPP/sapdata1[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(1.977):/oracle/EPP/sapdata1[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(1.978):/oracle/EPP/sapdata1[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(1.979):/oracle/EPP/sapdata1[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata1[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata1[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata1[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata1[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata1[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata1[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata1[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata1[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.860):/oracle/EPP/sapdata1[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.7 \n\t lvname = sapdata1lv \n\t label = /oracle/EPP/sapdata1 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:44 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata1[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[activate_fs_process_group:540] fs_mount /oracle/EPP/sapdata2 fsck epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[fs_mount:69] FS=/oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(1.981):/oracle/EPP/sapdata2[fs_mount:86] lsfs -c /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(1.982):/oracle/EPP/sapdata2[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(1.987):/oracle/EPP/sapdata2[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(1.983):/oracle/EPP/sapdata2[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata2:/dev/sapdata2lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(1.987):/oracle/EPP/sapdata2[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(1.987):/oracle/EPP/sapdata2[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(1.988):/oracle/EPP/sapdata2[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(1.983):/oracle/EPP/sapdata2[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata2:/dev/sapdata2lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(1.989):/oracle/EPP/sapdata2[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(1.990):/oracle/EPP/sapdata2[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(1.990):/oracle/EPP/sapdata2[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(1.991):/oracle/EPP/sapdata2[fs_mount:100] LV_name=sapdata2lv +epprd_rg:cl_activate_fs(1.991):/oracle/EPP/sapdata2[fs_mount:101] getlvcb -T -A sapdata2lv +epprd_rg:cl_activate_fs(1.992):/oracle/EPP/sapdata2[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(2.014):/oracle/EPP/sapdata2[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(1.993):/oracle/EPP/sapdata2[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.8 \n\t lvname = sapdata2lv \n\t label = /oracle/EPP/sapdata2 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:45 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' +epprd_rg:cl_activate_fs(2.014):/oracle/EPP/sapdata2[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(2.014):/oracle/EPP/sapdata2[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.015):/oracle/EPP/sapdata2[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(1.993):/oracle/EPP/sapdata2[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.8 \n\t lvname = sapdata2lv \n\t label = /oracle/EPP/sapdata2 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:45 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' +epprd_rg:cl_activate_fs(2.016):/oracle/EPP/sapdata2[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(2.017):/oracle/EPP/sapdata2[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(2.018):/oracle/EPP/sapdata2[fs_mount:115] clodmget -q 'name = sapdata2lv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(2.022):/oracle/EPP/sapdata2[fs_mount:115] CuAt_label=/oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(2.022):/oracle/EPP/sapdata2[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/sapdata2 from /etc/filesystems +epprd_rg:cl_activate_fs(2.022):/oracle/EPP/sapdata2[fs_mount:119] : should match /oracle/EPP/sapdata2 from CuAt ODM and /oracle/EPP/sapdata2 from the LVCB +epprd_rg:cl_activate_fs(2.022):/oracle/EPP/sapdata2[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(2.022):/oracle/EPP/sapdata2[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(2.022):/oracle/EPP/sapdata2[fs_mount:123] [[ /oracle/EPP/sapdata2 != /oracle/EPP/sapdata2 ]] +epprd_rg:cl_activate_fs(2.022):/oracle/EPP/sapdata2[fs_mount:128] [[ /oracle/EPP/sapdata2 != /oracle/EPP/sapdata2 ]] +epprd_rg:cl_activate_fs(2.022):/oracle/EPP/sapdata2[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.022):/oracle/EPP/sapdata2[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(2.022):/oracle/EPP/sapdata2[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(2.042):/oracle/EPP/sapdata2[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.042):/oracle/EPP/sapdata2[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(2.042):/oracle/EPP/sapdata2[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_activate_fs(2.042):/oracle/EPP/sapdata2[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.043):/oracle/EPP/sapdata2[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.067):/oracle/EPP/sapdata2[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.070):/oracle/EPP/sapdata2[amlog_trace:319] DATE=2023-09-30T03:25:22.400957 +epprd_rg:cl_activate_fs(2.070):/oracle/EPP/sapdata2[amlog_trace:320] echo '|2023-09-30T03:25:22.400957|INFO: Activating Filesystem|/oracle/EPP/sapdata2' +epprd_rg:cl_activate_fs(2.070):/oracle/EPP/sapdata2[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.070):/oracle/EPP/sapdata2[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata2[fs_mount:162] : Try to mount filesystem /oracle/EPP/sapdata2 at Sep 30 03:25:22.000 +epprd_rg:cl_activate_fs(2.073):/oracle/EPP/sapdata2[fs_mount:163] mount /oracle/EPP/sapdata2 +epprd_rg:cl_activate_fs(2.085):/oracle/EPP/sapdata2[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.085):/oracle/EPP/sapdata2[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(2.085):/oracle/EPP/sapdata2[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(2.085):/oracle/EPP/sapdata2[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/sapdata2' +epprd_rg:cl_activate_fs(2.085):/oracle/EPP/sapdata2[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.086):/oracle/EPP/sapdata2[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.110):/oracle/EPP/sapdata2[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.113):/oracle/EPP/sapdata2[amlog_trace:319] DATE=2023-09-30T03:25:22.443956 +epprd_rg:cl_activate_fs(2.113):/oracle/EPP/sapdata2[amlog_trace:320] echo '|2023-09-30T03:25:22.443956|INFO: Activating Filesystems completed|/oracle/EPP/sapdata2' +epprd_rg:cl_activate_fs(2.113):/oracle/EPP/sapdata2[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.113):/oracle/EPP/sapdata2[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(2.113):/oracle/EPP/sapdata2[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(2.113):/oracle/EPP/sapdata2[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(2.113):/oracle/EPP/sapdata2[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(2.113):/oracle/EPP/sapdata2[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(2.113):/oracle/EPP/sapdata2[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(2.113):/oracle/EPP/sapdata2[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(2.113):/oracle/EPP/sapdata2[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(2.113):/oracle/EPP/sapdata2[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(2.113):/oracle/EPP/sapdata2[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(2.114):/oracle/EPP/sapdata2[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(2.116):/oracle/EPP/sapdata2[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(2.117):/oracle/EPP/sapdata2[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(2.117):/oracle/EPP/sapdata2[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(2.117):/oracle/EPP/sapdata2[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(2.117):/oracle/EPP/sapdata2[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(2.117):/oracle/EPP/sapdata2[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(2.117):/oracle/EPP/sapdata2[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(2.117):/oracle/EPP/sapdata2[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(2.117):/oracle/EPP/sapdata2[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(1.993):/oracle/EPP/sapdata2[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.8 \n\t lvname = sapdata2lv \n\t label = /oracle/EPP/sapdata2 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:45 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(2.117):/oracle/EPP/sapdata2[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[activate_fs_process_group:540] fs_mount /oracle/EPP/sapdata3 fsck epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[fs_mount:69] FS=/oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(2.118):/oracle/EPP/sapdata3[fs_mount:86] lsfs -c /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(2.119):/oracle/EPP/sapdata3[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(2.124):/oracle/EPP/sapdata3[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(2.119):/oracle/EPP/sapdata3[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata3:/dev/sapdata3lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.124):/oracle/EPP/sapdata3[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(2.124):/oracle/EPP/sapdata3[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.125):/oracle/EPP/sapdata3[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(2.119):/oracle/EPP/sapdata3[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata3:/dev/sapdata3lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.126):/oracle/EPP/sapdata3[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(2.127):/oracle/EPP/sapdata3[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(2.127):/oracle/EPP/sapdata3[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(2.128):/oracle/EPP/sapdata3[fs_mount:100] LV_name=sapdata3lv +epprd_rg:cl_activate_fs(2.128):/oracle/EPP/sapdata3[fs_mount:101] getlvcb -T -A sapdata3lv +epprd_rg:cl_activate_fs(2.129):/oracle/EPP/sapdata3[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(2.147):/oracle/EPP/sapdata3[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(2.129):/oracle/EPP/sapdata3[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.9 \n\t lvname = sapdata3lv \n\t label = /oracle/EPP/sapdata3 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' +epprd_rg:cl_activate_fs(2.147):/oracle/EPP/sapdata3[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(2.147):/oracle/EPP/sapdata3[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.148):/oracle/EPP/sapdata3[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(2.129):/oracle/EPP/sapdata3[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.9 \n\t lvname = sapdata3lv \n\t label = /oracle/EPP/sapdata3 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' +epprd_rg:cl_activate_fs(2.149):/oracle/EPP/sapdata3[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(2.150):/oracle/EPP/sapdata3[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(2.151):/oracle/EPP/sapdata3[fs_mount:115] clodmget -q 'name = sapdata3lv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(2.155):/oracle/EPP/sapdata3[fs_mount:115] CuAt_label=/oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(2.155):/oracle/EPP/sapdata3[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/sapdata3 from /etc/filesystems +epprd_rg:cl_activate_fs(2.155):/oracle/EPP/sapdata3[fs_mount:119] : should match /oracle/EPP/sapdata3 from CuAt ODM and /oracle/EPP/sapdata3 from the LVCB +epprd_rg:cl_activate_fs(2.155):/oracle/EPP/sapdata3[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(2.155):/oracle/EPP/sapdata3[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(2.155):/oracle/EPP/sapdata3[fs_mount:123] [[ /oracle/EPP/sapdata3 != /oracle/EPP/sapdata3 ]] +epprd_rg:cl_activate_fs(2.155):/oracle/EPP/sapdata3[fs_mount:128] [[ /oracle/EPP/sapdata3 != /oracle/EPP/sapdata3 ]] +epprd_rg:cl_activate_fs(2.155):/oracle/EPP/sapdata3[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.155):/oracle/EPP/sapdata3[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(2.155):/oracle/EPP/sapdata3[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(2.175):/oracle/EPP/sapdata3[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.175):/oracle/EPP/sapdata3[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(2.175):/oracle/EPP/sapdata3[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_activate_fs(2.175):/oracle/EPP/sapdata3[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.176):/oracle/EPP/sapdata3[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.201):/oracle/EPP/sapdata3[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.203):/oracle/EPP/sapdata3[amlog_trace:319] DATE=2023-09-30T03:25:22.534187 +epprd_rg:cl_activate_fs(2.204):/oracle/EPP/sapdata3[amlog_trace:320] echo '|2023-09-30T03:25:22.534187|INFO: Activating Filesystem|/oracle/EPP/sapdata3' +epprd_rg:cl_activate_fs(2.204):/oracle/EPP/sapdata3[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.204):/oracle/EPP/sapdata3[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(2.206):/oracle/EPP/sapdata3[fs_mount:162] : Try to mount filesystem /oracle/EPP/sapdata3 at Sep 30 03:25:22.000 +epprd_rg:cl_activate_fs(2.206):/oracle/EPP/sapdata3[fs_mount:163] mount /oracle/EPP/sapdata3 +epprd_rg:cl_activate_fs(2.218):/oracle/EPP/sapdata3[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.218):/oracle/EPP/sapdata3[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(2.218):/oracle/EPP/sapdata3[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(2.218):/oracle/EPP/sapdata3[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/sapdata3' +epprd_rg:cl_activate_fs(2.218):/oracle/EPP/sapdata3[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.219):/oracle/EPP/sapdata3[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.244):/oracle/EPP/sapdata3[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.247):/oracle/EPP/sapdata3[amlog_trace:319] DATE=2023-09-30T03:25:22.577327 +epprd_rg:cl_activate_fs(2.247):/oracle/EPP/sapdata3[amlog_trace:320] echo '|2023-09-30T03:25:22.577327|INFO: Activating Filesystems completed|/oracle/EPP/sapdata3' +epprd_rg:cl_activate_fs(2.247):/oracle/EPP/sapdata3[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.247):/oracle/EPP/sapdata3[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(2.247):/oracle/EPP/sapdata3[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(2.247):/oracle/EPP/sapdata3[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(2.247):/oracle/EPP/sapdata3[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(2.247):/oracle/EPP/sapdata3[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(2.247):/oracle/EPP/sapdata3[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(2.247):/oracle/EPP/sapdata3[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(2.247):/oracle/EPP/sapdata3[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(2.247):/oracle/EPP/sapdata3[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(2.247):/oracle/EPP/sapdata3[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(2.248):/oracle/EPP/sapdata3[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(2.249):/oracle/EPP/sapdata3[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata3[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata3[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata3[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata3[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata3[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata3[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata3[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata3[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(2.129):/oracle/EPP/sapdata3[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.9 \n\t lvname = sapdata3lv \n\t label = /oracle/EPP/sapdata3 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Jan 28 17:10:43 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata3[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[activate_fs_process_group:527] PS4_LOOP=/oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[activate_fs_process_group:540] fs_mount /oracle/EPP/sapdata4 fsck epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[fs_mount:69] FS=/oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[fs_mount:81] : Here check to see if the information in /etc/filesystems for /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(2.251):/oracle/EPP/sapdata4[fs_mount:86] lsfs -c /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(2.252):/oracle/EPP/sapdata4[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(2.257):/oracle/EPP/sapdata4[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(2.252):/oracle/EPP/sapdata4[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata4:/dev/sapdata4lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.257):/oracle/EPP/sapdata4[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(2.257):/oracle/EPP/sapdata4[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.258):/oracle/EPP/sapdata4[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(2.252):/oracle/EPP/sapdata4[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/oracle/EPP/sapdata4:/dev/sapdata4lv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.260):/oracle/EPP/sapdata4[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(2.260):/oracle/EPP/sapdata4[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(2.260):/oracle/EPP/sapdata4[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(2.261):/oracle/EPP/sapdata4[fs_mount:100] LV_name=sapdata4lv +epprd_rg:cl_activate_fs(2.261):/oracle/EPP/sapdata4[fs_mount:101] getlvcb -T -A sapdata4lv +epprd_rg:cl_activate_fs(2.262):/oracle/EPP/sapdata4[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(2.280):/oracle/EPP/sapdata4[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(2.263):/oracle/EPP/sapdata4[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.10 \n\t lvname = sapdata4lv \n\t label = /oracle/EPP/sapdata4 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' +epprd_rg:cl_activate_fs(2.280):/oracle/EPP/sapdata4[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(2.280):/oracle/EPP/sapdata4[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.281):/oracle/EPP/sapdata4[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(2.263):/oracle/EPP/sapdata4[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.10 \n\t lvname = sapdata4lv \n\t label = /oracle/EPP/sapdata4 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' +epprd_rg:cl_activate_fs(2.282):/oracle/EPP/sapdata4[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(2.283):/oracle/EPP/sapdata4[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(2.284):/oracle/EPP/sapdata4[fs_mount:115] clodmget -q 'name = sapdata4lv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(2.288):/oracle/EPP/sapdata4[fs_mount:115] CuAt_label=/oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(2.288):/oracle/EPP/sapdata4[fs_mount:118] : At this point, if things are working correctly, /oracle/EPP/sapdata4 from /etc/filesystems +epprd_rg:cl_activate_fs(2.288):/oracle/EPP/sapdata4[fs_mount:119] : should match /oracle/EPP/sapdata4 from CuAt ODM and /oracle/EPP/sapdata4 from the LVCB +epprd_rg:cl_activate_fs(2.288):/oracle/EPP/sapdata4[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(2.288):/oracle/EPP/sapdata4[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(2.288):/oracle/EPP/sapdata4[fs_mount:123] [[ /oracle/EPP/sapdata4 != /oracle/EPP/sapdata4 ]] +epprd_rg:cl_activate_fs(2.288):/oracle/EPP/sapdata4[fs_mount:128] [[ /oracle/EPP/sapdata4 != /oracle/EPP/sapdata4 ]] +epprd_rg:cl_activate_fs(2.288):/oracle/EPP/sapdata4[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.288):/oracle/EPP/sapdata4[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(2.288):/oracle/EPP/sapdata4[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(2.308):/oracle/EPP/sapdata4[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.308):/oracle/EPP/sapdata4[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(2.308):/oracle/EPP/sapdata4[fs_mount:160] amlog_trace '' 'Activating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_activate_fs(2.308):/oracle/EPP/sapdata4[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.309):/oracle/EPP/sapdata4[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.333):/oracle/EPP/sapdata4[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.336):/oracle/EPP/sapdata4[amlog_trace:319] DATE=2023-09-30T03:25:22.666753 +epprd_rg:cl_activate_fs(2.336):/oracle/EPP/sapdata4[amlog_trace:320] echo '|2023-09-30T03:25:22.666753|INFO: Activating Filesystem|/oracle/EPP/sapdata4' +epprd_rg:cl_activate_fs(2.336):/oracle/EPP/sapdata4[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.336):/oracle/EPP/sapdata4[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(2.339):/oracle/EPP/sapdata4[fs_mount:162] : Try to mount filesystem /oracle/EPP/sapdata4 at Sep 30 03:25:22.000 +epprd_rg:cl_activate_fs(2.339):/oracle/EPP/sapdata4[fs_mount:163] mount /oracle/EPP/sapdata4 +epprd_rg:cl_activate_fs(2.351):/oracle/EPP/sapdata4[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.351):/oracle/EPP/sapdata4[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(2.351):/oracle/EPP/sapdata4[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(2.351):/oracle/EPP/sapdata4[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/oracle/EPP/sapdata4' +epprd_rg:cl_activate_fs(2.351):/oracle/EPP/sapdata4[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.352):/oracle/EPP/sapdata4[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.376):/oracle/EPP/sapdata4[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.379):/oracle/EPP/sapdata4[amlog_trace:319] DATE=2023-09-30T03:25:22.709579 +epprd_rg:cl_activate_fs(2.379):/oracle/EPP/sapdata4[amlog_trace:320] echo '|2023-09-30T03:25:22.709579|INFO: Activating Filesystems completed|/oracle/EPP/sapdata4' +epprd_rg:cl_activate_fs(2.379):/oracle/EPP/sapdata4[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.379):/oracle/EPP/sapdata4[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(2.379):/oracle/EPP/sapdata4[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(2.379):/oracle/EPP/sapdata4[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(2.379):/oracle/EPP/sapdata4[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(2.379):/oracle/EPP/sapdata4[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(2.379):/oracle/EPP/sapdata4[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(2.379):/oracle/EPP/sapdata4[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(2.379):/oracle/EPP/sapdata4[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(2.379):/oracle/EPP/sapdata4[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(2.379):/oracle/EPP/sapdata4[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(2.380):/oracle/EPP/sapdata4[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(2.381):/oracle/EPP/sapdata4[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(2.383):/oracle/EPP/sapdata4[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(2.383):/oracle/EPP/sapdata4[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(2.383):/oracle/EPP/sapdata4[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(2.383):/oracle/EPP/sapdata4[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(2.383):/oracle/EPP/sapdata4[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(2.383):/oracle/EPP/sapdata4[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(2.383):/oracle/EPP/sapdata4[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(2.383):/oracle/EPP/sapdata4[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(2.263):/oracle/EPP/sapdata4[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.10 \n\t lvname = sapdata4lv \n\t label = /oracle/EPP/sapdata4 \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:46 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(2.383):/oracle/EPP/sapdata4[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(2.383):/sapmnt[activate_fs_process_group:527] PS4_LOOP=/sapmnt +epprd_rg:cl_activate_fs(2.383):/sapmnt[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(2.383):/sapmnt[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(2.383):/sapmnt[activate_fs_process_group:540] fs_mount /sapmnt fsck epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(2.383):/sapmnt[fs_mount:69] FS=/sapmnt +epprd_rg:cl_activate_fs(2.383):/sapmnt[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(2.383):/sapmnt[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(2.383):/sapmnt[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(2.383):/sapmnt[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(2.383):/sapmnt[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(2.383):/sapmnt[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.383):/sapmnt[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(2.383):/sapmnt[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(2.383):/sapmnt[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(2.383):/sapmnt[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(2.383):/sapmnt[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(2.383):/sapmnt[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(2.383):/sapmnt[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(2.383):/sapmnt[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(2.383):/sapmnt[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(2.383):/sapmnt[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(2.383):/sapmnt[fs_mount:81] : Here check to see if the information in /etc/filesystems for /sapmnt +epprd_rg:cl_activate_fs(2.383):/sapmnt[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(2.383):/sapmnt[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(2.383):/sapmnt[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(2.384):/sapmnt[fs_mount:86] lsfs -c /sapmnt +epprd_rg:cl_activate_fs(2.385):/sapmnt[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(2.390):/sapmnt[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(2.385):/sapmnt[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/sapmnt:/dev/sapmntlv:jfs2:::20971520:rw:no:no' +epprd_rg:cl_activate_fs(2.390):/sapmnt[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(2.390):/sapmnt[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.391):/sapmnt[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(2.385):/sapmnt[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/sapmnt:/dev/sapmntlv:jfs2:::20971520:rw:no:no' +epprd_rg:cl_activate_fs(2.392):/sapmnt[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(2.392):/sapmnt[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(2.393):/sapmnt[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(2.394):/sapmnt[fs_mount:100] LV_name=sapmntlv +epprd_rg:cl_activate_fs(2.394):/sapmnt[fs_mount:101] getlvcb -T -A sapmntlv +epprd_rg:cl_activate_fs(2.395):/sapmnt[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(2.413):/sapmnt[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(2.395):/sapmnt[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.3 \n\t lvname = sapmntlv \n\t label = /sapmnt \n\t machine id = 44AF14B00 \n\t number lps = 20 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:41 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' +epprd_rg:cl_activate_fs(2.413):/sapmnt[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(2.413):/sapmnt[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.414):/sapmnt[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(2.395):/sapmnt[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.3 \n\t lvname = sapmntlv \n\t label = /sapmnt \n\t machine id = 44AF14B00 \n\t number lps = 20 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:41 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' +epprd_rg:cl_activate_fs(2.415):/sapmnt[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(2.416):/sapmnt[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(2.417):/sapmnt[fs_mount:115] clodmget -q 'name = sapmntlv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(2.421):/sapmnt[fs_mount:115] CuAt_label=/sapmnt +epprd_rg:cl_activate_fs(2.421):/sapmnt[fs_mount:118] : At this point, if things are working correctly, /sapmnt from /etc/filesystems +epprd_rg:cl_activate_fs(2.421):/sapmnt[fs_mount:119] : should match /sapmnt from CuAt ODM and /sapmnt from the LVCB +epprd_rg:cl_activate_fs(2.421):/sapmnt[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(2.421):/sapmnt[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(2.421):/sapmnt[fs_mount:123] [[ /sapmnt != /sapmnt ]] +epprd_rg:cl_activate_fs(2.421):/sapmnt[fs_mount:128] [[ /sapmnt != /sapmnt ]] +epprd_rg:cl_activate_fs(2.421):/sapmnt[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.421):/sapmnt[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(2.421):/sapmnt[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(2.441):/sapmnt[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.441):/sapmnt[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(2.441):/sapmnt[fs_mount:160] amlog_trace '' 'Activating Filesystem|/sapmnt' +epprd_rg:cl_activate_fs(2.441):/sapmnt[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.442):/sapmnt[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.466):/sapmnt[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.469):/sapmnt[amlog_trace:319] DATE=2023-09-30T03:25:22.799603 +epprd_rg:cl_activate_fs(2.469):/sapmnt[amlog_trace:320] echo '|2023-09-30T03:25:22.799603|INFO: Activating Filesystem|/sapmnt' +epprd_rg:cl_activate_fs(2.469):/sapmnt[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.469):/sapmnt[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(2.472):/sapmnt[fs_mount:162] : Try to mount filesystem /sapmnt at Sep 30 03:25:22.000 +epprd_rg:cl_activate_fs(2.472):/sapmnt[fs_mount:163] mount /sapmnt +epprd_rg:cl_activate_fs(2.484):/sapmnt[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.484):/sapmnt[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(2.484):/sapmnt[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(2.484):/sapmnt[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/sapmnt' +epprd_rg:cl_activate_fs(2.484):/sapmnt[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.485):/sapmnt[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.509):/sapmnt[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.512):/sapmnt[amlog_trace:319] DATE=2023-09-30T03:25:22.842765 +epprd_rg:cl_activate_fs(2.512):/sapmnt[amlog_trace:320] echo '|2023-09-30T03:25:22.842765|INFO: Activating Filesystems completed|/sapmnt' +epprd_rg:cl_activate_fs(2.512):/sapmnt[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.512):/sapmnt[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(2.512):/sapmnt[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(2.512):/sapmnt[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(2.512):/sapmnt[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(2.512):/sapmnt[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(2.512):/sapmnt[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(2.512):/sapmnt[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(2.512):/sapmnt[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(2.512):/sapmnt[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(2.512):/sapmnt[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(2.513):/sapmnt[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(2.514):/sapmnt[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(2.516):/sapmnt[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(2.516):/sapmnt[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(2.516):/sapmnt[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(2.516):/sapmnt[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(2.516):/sapmnt[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(2.516):/sapmnt[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(2.516):/sapmnt[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(2.516):/sapmnt[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(2.395):/sapmnt[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.3 \n\t lvname = sapmntlv \n\t label = /sapmnt \n\t machine id = 44AF14B00 \n\t number lps = 20 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:41 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(2.516):/sapmnt[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(2.516):/usr/sap[activate_fs_process_group:527] PS4_LOOP=/usr/sap +epprd_rg:cl_activate_fs(2.516):/usr/sap[activate_fs_process_group:528] [[ sequential == parallel ]] +epprd_rg:cl_activate_fs(2.516):/usr/sap[activate_fs_process_group:538] : Call fs_mount function in foreground for serial recovery +epprd_rg:cl_activate_fs(2.516):/usr/sap[activate_fs_process_group:540] fs_mount /usr/sap fsck epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(2.516):/usr/sap[fs_mount:69] FS=/usr/sap +epprd_rg:cl_activate_fs(2.517):/usr/sap[fs_mount:69] typeset FS +epprd_rg:cl_activate_fs(2.517):/usr/sap[fs_mount:70] TOOL=fsck +epprd_rg:cl_activate_fs(2.517):/usr/sap[fs_mount:70] typeset TOOL +epprd_rg:cl_activate_fs(2.517):/usr/sap[fs_mount:71] TMP_FILENAME=epprd_rg_activate_fs.tmp21430736 +epprd_rg:cl_activate_fs(2.517):/usr/sap[fs_mount:71] typeset TMP_FILENAME +epprd_rg:cl_activate_fs(2.517):/usr/sap[fs_mount:72] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.517):/usr/sap[fs_mount:72] typeset WPAR_ROOT +epprd_rg:cl_activate_fs(2.517):/usr/sap[fs_mount:73] MOUNT_ARGS='' +epprd_rg:cl_activate_fs(2.517):/usr/sap[fs_mount:73] typeset MOUNT_ARGS +epprd_rg:cl_activate_fs(2.517):/usr/sap[fs_mount:74] STATUS=0 +epprd_rg:cl_activate_fs(2.517):/usr/sap[fs_mount:74] typeset -i STATUS +epprd_rg:cl_activate_fs(2.517):/usr/sap[fs_mount:75] typeset LVCB_info +epprd_rg:cl_activate_fs(2.517):/usr/sap[fs_mount:76] typeset FS_info +epprd_rg:cl_activate_fs(2.517):/usr/sap[fs_mount:77] typeset LV_name +epprd_rg:cl_activate_fs(2.517):/usr/sap[fs_mount:78] RC=0 +epprd_rg:cl_activate_fs(2.517):/usr/sap[fs_mount:78] typeset -i RC +epprd_rg:cl_activate_fs(2.517):/usr/sap[fs_mount:81] : Here check to see if the information in /etc/filesystems for /usr/sap +epprd_rg:cl_activate_fs(2.517):/usr/sap[fs_mount:82] : is consistent with what is in CuAt ODM for the logical volume: +epprd_rg:cl_activate_fs(2.517):/usr/sap[fs_mount:83] : the label field for the logical volume should match the mount +epprd_rg:cl_activate_fs(2.517):/usr/sap[fs_mount:84] : point in /etc/filesystems. +epprd_rg:cl_activate_fs(2.517):/usr/sap[fs_mount:86] lsfs -c /usr/sap +epprd_rg:cl_activate_fs(2.518):/usr/sap[fs_mount:86] 2>& 1 +epprd_rg:cl_activate_fs(2.523):/usr/sap[fs_mount:86] FS_info=$'+epprd_rg:cl_activate_fs(2.518):/usr/sap[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/usr/sap:/dev/saplv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.523):/usr/sap[fs_mount:87] RC=0 +epprd_rg:cl_activate_fs(2.523):/usr/sap[fs_mount:88] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.524):/usr/sap[fs_mount:99] print -- $'+epprd_rg:cl_activate_fs(2.518):/usr/sap[fs_mount:86] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/usr/sap:/dev/saplv:jfs2:::104857600:rw:no:no' +epprd_rg:cl_activate_fs(2.525):/usr/sap[fs_mount:99] tail -1 +epprd_rg:cl_activate_fs(2.526):/usr/sap[fs_mount:99] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs(2.526):/usr/sap[fs_mount:99] IFS=: +epprd_rg:cl_activate_fs(2.527):/usr/sap[fs_mount:100] LV_name=saplv +epprd_rg:cl_activate_fs(2.527):/usr/sap[fs_mount:101] getlvcb -T -A saplv +epprd_rg:cl_activate_fs(2.528):/usr/sap[fs_mount:101] 2>& 1 +epprd_rg:cl_activate_fs(2.546):/usr/sap[fs_mount:101] LVCB_info=$'+epprd_rg:cl_activate_fs(2.528):/usr/sap[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.2 \n\t lvname = saplv \n\t label = /usr/sap \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:37 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' +epprd_rg:cl_activate_fs(2.546):/usr/sap[fs_mount:102] RC=0 +epprd_rg:cl_activate_fs(2.546):/usr/sap[fs_mount:103] (( 0 != 0 )) +epprd_rg:cl_activate_fs(2.547):/usr/sap[fs_mount:114] print -- $'+epprd_rg:cl_activate_fs(2.528):/usr/sap[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.2 \n\t lvname = saplv \n\t label = /usr/sap \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:37 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' +epprd_rg:cl_activate_fs(2.548):/usr/sap[fs_mount:114] grep -w 'label =' +epprd_rg:cl_activate_fs(2.549):/usr/sap[fs_mount:114] read skip skip LVCB_label +epprd_rg:cl_activate_fs(2.550):/usr/sap[fs_mount:115] clodmget -q 'name = saplv and attribute = label' -f value -n CuAt +epprd_rg:cl_activate_fs(2.554):/usr/sap[fs_mount:115] CuAt_label=/usr/sap +epprd_rg:cl_activate_fs(2.554):/usr/sap[fs_mount:118] : At this point, if things are working correctly, /usr/sap from /etc/filesystems +epprd_rg:cl_activate_fs(2.554):/usr/sap[fs_mount:119] : should match /usr/sap from CuAt ODM and /usr/sap from the LVCB +epprd_rg:cl_activate_fs(2.554):/usr/sap[fs_mount:120] : on disk. No recovery is done at this point, because best efforts at recovery +epprd_rg:cl_activate_fs(2.554):/usr/sap[fs_mount:121] : were done in clvaryonvg. +epprd_rg:cl_activate_fs(2.554):/usr/sap[fs_mount:123] [[ /usr/sap != /usr/sap ]] +epprd_rg:cl_activate_fs(2.554):/usr/sap[fs_mount:128] [[ /usr/sap != /usr/sap ]] +epprd_rg:cl_activate_fs(2.554):/usr/sap[fs_mount:133] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.554):/usr/sap[fs_mount:143] [[ -n epprd_rg ]] +epprd_rg:cl_activate_fs(2.554):/usr/sap[fs_mount:143] clwparroot epprd_rg +epprd_rg:clwparroot[42] [[ high == high ]] +epprd_rg:clwparroot[42] version=1.1 +epprd_rg:clwparroot[44] . /usr/es/sbin/cluster/wpar/wpar_utils +epprd_rg:clwparroot[11] . /usr/es/sbin/cluster/wpar/wpar_common_funcs +epprd_rg:clwparroot[26] [[ high == high ]] +epprd_rg:clwparroot[26] set -x +epprd_rg:clwparroot[27] [[ high == high ]] +epprd_rg:clwparroot[27] version='1.6 $Source: 61haes_r711 43haes/usr/sbin/cluster/wpar/wpar_common_funcs.sh 1$' +epprd_rg:clwparroot[29] PATH=/usr/bin:/usr/sbin:/usr/es/sbin/cluster:/usr/es/sbin/cluster/utilities:/usr/es/sbin/cluster/events:/usr/es/sbin/cluster/events/utils:/usr/es/sbin/cluster/events/cmd:/usr/es/sbin/cluster/diag:/usr/es/sbin/cluster/etc:/usr/es/sbin/cluster/sbin:/usr/es/sbin/cluster/cspoc:/usr/es/sbin/cluster/conversion:/usr/es/sbin/cluster/glvm/utils:/usr/es/sbin/cluster/wpar:/usr/es/sbin/cluster/sa/sbin:/usr/lib/cluster:/opt/freeware/bin:/usr/es/sbin/cluster/clanalyze:/etc:/usr/ucb:/usr/bin/X11:/sbin:/bin:/usr/bin:/usr/sbin +epprd_rg:clwparroot[30] export PATH +epprd_rg:clwparroot[32] typeset usageErr invalArgErr internalErr +epprd_rg:clwparroot[34] usageErr=10 +epprd_rg:clwparroot[35] invalArgErr=11 +epprd_rg:clwparroot[36] internalErr=12 +epprd_rg:clwparroot[46] rgName=epprd_rg +epprd_rg:clwparroot[49] uname +epprd_rg:clwparroot[49] OSNAME=AIX +epprd_rg:clwparroot[51] [[ AIX == *AIX* ]] +epprd_rg:clwparroot[52] lslpp -l bos.wpars +epprd_rg:clwparroot[52] 1> /dev/null 2>& 1 +epprd_rg:clwparroot[54] loadWparName epprd_rg +epprd_rg:clwparroot[loadWparName:1484] [[ 1 =~ 1 ]] +epprd_rg:clwparroot[loadWparName:1490] clodmget -q 'name = WPAR_NAME' -f value -n HACMPresource +epprd_rg:clwparroot[loadWparName:1490] [[ -z '' ]] +epprd_rg:clwparroot[loadWparName:1490] return 0 +epprd_rg:clwparroot[54] wparName='' +epprd_rg:clwparroot[55] (( 0 != 0 )) +epprd_rg:clwparroot[55] [[ -z '' ]] +epprd_rg:clwparroot[57] exit 0 +epprd_rg:cl_activate_fs(2.574):/usr/sap[fs_mount:143] WPAR_ROOT='' +epprd_rg:cl_activate_fs(2.574):/usr/sap[fs_mount:144] [[ -n '' ]] +epprd_rg:cl_activate_fs(2.574):/usr/sap[fs_mount:160] amlog_trace '' 'Activating Filesystem|/usr/sap' +epprd_rg:cl_activate_fs(2.574):/usr/sap[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.575):/usr/sap[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.599):/usr/sap[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.602):/usr/sap[amlog_trace:319] DATE=2023-09-30T03:25:22.932692 +epprd_rg:cl_activate_fs(2.602):/usr/sap[amlog_trace:320] echo '|2023-09-30T03:25:22.932692|INFO: Activating Filesystem|/usr/sap' +epprd_rg:cl_activate_fs(2.602):/usr/sap[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.602):/usr/sap[fs_mount:162] date '+%h %d %H:%M:%S.000' +epprd_rg:cl_activate_fs(2.605):/usr/sap[fs_mount:162] : Try to mount filesystem /usr/sap at Sep 30 03:25:22.000 +epprd_rg:cl_activate_fs(2.605):/usr/sap[fs_mount:163] mount /usr/sap +epprd_rg:cl_activate_fs(2.617):/usr/sap[fs_mount:209] (( 0 == 1 )) +epprd_rg:cl_activate_fs(2.617):/usr/sap[fs_mount:219] : On successful mount of a JFS2 file system, engage mountguard, +epprd_rg:cl_activate_fs(2.617):/usr/sap[fs_mount:220] : if we are running on an AIX level that suppors it +epprd_rg:cl_activate_fs(2.617):/usr/sap[fs_mount:223] amlog_trace '' 'Activating Filesystems completed|/usr/sap' +epprd_rg:cl_activate_fs(2.617):/usr/sap[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_fs(2.618):/usr/sap[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_fs(2.643):/usr/sap[amlog_trace:319] cltime +epprd_rg:cl_activate_fs(2.645):/usr/sap[amlog_trace:319] DATE=2023-09-30T03:25:22.976104 +epprd_rg:cl_activate_fs(2.645):/usr/sap[amlog_trace:320] echo '|2023-09-30T03:25:22.976104|INFO: Activating Filesystems completed|/usr/sap' +epprd_rg:cl_activate_fs(2.645):/usr/sap[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_fs(2.646):/usr/sap[fs_mount:224] [[ jfs2 == jfs2 ]] +epprd_rg:cl_activate_fs(2.646):/usr/sap[fs_mount:226] : Each of the V, R, M and F fields are padded to fixed length, +epprd_rg:cl_activate_fs(2.646):/usr/sap[fs_mount:227] : to allow reliable comparisons. E.g., maximum VRMF is +epprd_rg:cl_activate_fs(2.646):/usr/sap[fs_mount:228] : 99.99.999.999 +epprd_rg:cl_activate_fs(2.646):/usr/sap[fs_mount:230] typeset -li V R M F +epprd_rg:cl_activate_fs(2.646):/usr/sap[fs_mount:231] typeset -Z2 R +epprd_rg:cl_activate_fs(2.646):/usr/sap[fs_mount:232] typeset -Z3 M +epprd_rg:cl_activate_fs(2.646):/usr/sap[fs_mount:233] typeset -Z3 F +epprd_rg:cl_activate_fs(2.646):/usr/sap[fs_mount:234] VRMF=0 +epprd_rg:cl_activate_fs(2.646):/usr/sap[fs_mount:234] typeset -li VRMF +epprd_rg:cl_activate_fs(2.647):/usr/sap[fs_mount:236] lslpp -lcqOr bos.rte.filesystem +epprd_rg:cl_activate_fs(2.648):/usr/sap[fs_mount:236] cut -f3 -d: +epprd_rg:cl_activate_fs(2.649):/usr/sap[fs_mount:236] read V R M F +epprd_rg:cl_activate_fs(2.649):/usr/sap[fs_mount:236] IFS=. +epprd_rg:cl_activate_fs(2.650):/usr/sap[fs_mount:237] VRMF=702005102 +epprd_rg:cl_activate_fs(2.650):/usr/sap[fs_mount:240] (( 7 == 6 && 702005102 >= 601007000 )) +epprd_rg:cl_activate_fs(2.650):/usr/sap[fs_mount:241] (( 7 == 7 && 702005102 >= 701001000 )) +epprd_rg:cl_activate_fs(2.650):/usr/sap[fs_mount:244] : Tell JFS2 to try to protect against double mounts via fs mountguard +epprd_rg:cl_activate_fs(2.650):/usr/sap[fs_mount:245] : the setting would cause VG timestamp change so run once +epprd_rg:cl_activate_fs(2.650):/usr/sap[fs_mount:247] [[ $'+epprd_rg:cl_activate_fs(2.528):/usr/sap[fs_mount:101] LC_ALL=C\n\t AIX LVCB\n\t intrapolicy = c \n\t copies = 1 \n\t interpolicy = x \n\t lvid = 00c44af100004b00000001851e9dc053.2 \n\t lvname = saplv \n\t label = /usr/sap \n\t machine id = 44AF14B00 \n\t number lps = 100 \n\t relocatable = y \n\t strict = y \n\t stripe width = 0 \n\t stripe size in exponent = 0 \n\t type = jfs2 \n\t upperbound = 1024 \n\t fs = vfs=jfs2:log=/dev/epprdaloglv:account=false:mountguard=yes \n\t time created = Sat Dec 17 14:46:37 2022\n \t time modified = Sat Jan 28 17:10:44 2023\n ' != *mountguard=yes* ]] +epprd_rg:cl_activate_fs(2.650):/usr/sap[fs_mount:255] return 0 +epprd_rg:cl_activate_fs(2.650):/usr/sap[activate_fs_process_group:543] unset PS4_LOOP PS4_TIMER +epprd_rg:cl_activate_fs[activate_fs_process_group:546] : Allow any background mount operations to finish +epprd_rg:cl_activate_fs[activate_fs_process_group:548] wait +epprd_rg:cl_activate_fs[activate_fs_process_group:550] : Read cluster level Preferread read option +epprd_rg:cl_activate_fs[activate_fs_process_group:552] clodmget -n -f lvm_preferred_read HACMPcluster +epprd_rg:cl_activate_fs[activate_fs_process_group:552] cluster_pref_read=roundrobin +epprd_rg:cl_activate_fs[activate_fs_process_group:555] : Looping all file systems to update preferred read option of each lv. +epprd_rg:cl_activate_fs[activate_fs_process_group:556] : By referring VG level preferred_read option or cluster level Preferred read option +epprd_rg:cl_activate_fs[activate_fs_process_group:560] lsfs -c /board_org +epprd_rg:cl_activate_fs[activate_fs_process_group:560] 2>& 1 +epprd_rg:cl_activate_fs[activate_fs_process_group:560] FS_info=$'+epprd_rg:cl_activate_fs[activate_fs_process_group:560] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs[activate_fs_process_group:561] RC=0 +epprd_rg:cl_activate_fs[activate_fs_process_group:562] (( 0 != 0 )) +epprd_rg:cl_activate_fs[activate_fs_process_group:574] print -- $'+epprd_rg:cl_activate_fs[activate_fs_process_group:560] LC_ALL=C\n#MountPoint:Device:Vfs:Nodename:Type:Size:Options:AutoMount:Acct\n/board_org:/dev/boardlv:jfs2:::10485760:rw:no:no' +epprd_rg:cl_activate_fs[activate_fs_process_group:574] tail -1 +epprd_rg:cl_activate_fs[activate_fs_process_group:574] read skip LV_dev_name vfs_type rest +epprd_rg:cl_activate_fs[activate_fs_process_group:574] IFS=: +epprd_rg:cl_activate_fs[activate_fs_process_group:575] LV_name=boardlv +epprd_rg:cl_activate_fs[activate_fs_process_group:577] grep -w 'VOLUME GROUP' +epprd_rg:cl_activate_fs[activate_fs_process_group:577] lslv -L boardlv +epprd_rg:cl_activate_fs[activate_fs_process_group:577] LC_ALL=C +epprd_rg:cl_activate_fs[activate_fs_process_group:577] volume_group='LOGICAL VOLUME: boardlv VOLUME GROUP: datavg' +epprd_rg:cl_activate_fs[activate_fs_process_group:578] volume_group=datavg +epprd_rg:cl_activate_fs[activate_fs_process_group:579] volume_group=datavg +epprd_rg:cl_activate_fs[activate_fs_process_group:581] clodmget -n -f group -q name='VOLUME_GROUP and value=datavg' HACMPresource +epprd_rg:cl_activate_fs[activate_fs_process_group:581] RGName=epprd_rg +epprd_rg:cl_activate_fs[activate_fs_process_group:584] : Get the Preferred storage read option for this VG and perform chlv command +epprd_rg:cl_activate_fs[activate_fs_process_group:586] clodmget -n -f value -q name='LVM_PREFERRED_READ and volume_group=datavg' HACMPvolumegroup +epprd_rg:cl_activate_fs[activate_fs_process_group:586] 2> /dev/null +epprd_rg:cl_activate_fs[activate_fs_process_group:586] PreferredReadOption='' +epprd_rg:cl_activate_fs[activate_fs_process_group:587] [[ -z '' ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:589] PreferredReadOption=roundrobin +epprd_rg:cl_activate_fs[activate_fs_process_group:590] [[ -z roundrobin ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:590] [[ roundrobin == roundrobin ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:593] : Both VG level and Cluster level LVM Preferred Read option chosen as roundrobin. +epprd_rg:cl_activate_fs[activate_fs_process_group:595] chlv -R 0 boardlv +epprd_rg:cl_activate_fs[activate_fs_process_group:596] (( 0 != 0 )) +epprd_rg:cl_activate_fs[activate_fs_process_group:600] break +epprd_rg:cl_activate_fs[activate_fs_process_group:670] : Update the resource manager with the state of the operation +epprd_rg:cl_activate_fs[activate_fs_process_group:672] ALLNOERROR=All_non_error_filesystems +epprd_rg:cl_activate_fs[activate_fs_process_group:673] cl_RMupdate resource_up All_non_error_filesystems cl_activate_fs 2023-09-30T03:25:23.281883 2023-09-30T03:25:23.286253 +epprd_rg:cl_activate_fs[activate_fs_process_group:676] : And harvest any status from the background mount operations +epprd_rg:cl_activate_fs[activate_fs_process_group:678] [[ -f /tmp/epprd_rg_activate_fs.tmp21430736 ]] +epprd_rg:cl_activate_fs[activate_fs_process_group:688] return 0 +epprd_rg:cl_activate_fs[activate_fs_process_resources:767] RC=0 +epprd_rg:cl_activate_fs[activate_fs_process_resources:768] (( 0 != 0 && 0 == 0 )) +epprd_rg:cl_activate_fs[activate_fs_process_resources:772] RG_FILE_SYSTEMS='' +epprd_rg:cl_activate_fs[activate_fs_process_resources:776] return 0 +epprd_rg:cl_activate_fs[851] STATUS=0 +epprd_rg:cl_activate_fs[873] return 0 +epprd_rg:process_resources(8.463)[process_file_systems:2648] RC=0 +epprd_rg:process_resources(8.463)[process_file_systems:2649] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources(8.463)[process_file_systems:2661] (( 0 != 0 )) +epprd_rg:process_resources(8.463)[process_file_systems:2687] return 0 +epprd_rg:process_resources(8.463)[3483] RC=0 +epprd_rg:process_resources(8.463)[3485] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources(8.463)[3324] true +epprd_rg:process_resources(8.463)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(8.463)[3328] set -a +epprd_rg:process_resources(8.463)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-09-30T03:25:23.299892 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(8.482)[3329] eval JOB_TYPE=SYNC_VGS ACTION=ACQUIRE VOLUME_GROUPS='"datavg"' RESOURCE_GROUPS='"epprd_rg' '"' +epprd_rg:process_resources(8.483)[1] JOB_TYPE=SYNC_VGS +epprd_rg:process_resources(8.483)[1] ACTION=ACQUIRE +epprd_rg:process_resources(8.483)[1] VOLUME_GROUPS=datavg +epprd_rg:process_resources(8.483)[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources(8.483)[3330] RC=0 +epprd_rg:process_resources(8.483)[3331] set +a +epprd_rg:process_resources(8.483)[3333] (( 0 != 0 )) +epprd_rg:process_resources(8.483)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(8.483)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(8.483)[3343] export GROUPNAME +epprd_rg:process_resources(8.483)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(8.483)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(8.483)[3360] [[ SYNC_VGS == RELEASE ]] +epprd_rg:process_resources(8.483)[3360] [[ SYNC_VGS == ONLINE ]] +epprd_rg:process_resources(8.483)[3474] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources(8.483)[3476] sync_volume_groups +epprd_rg:process_resources(8.483)[sync_volume_groups:2699] PS4_FUNC=sync_volume_groups +epprd_rg:process_resources(8.483)[sync_volume_groups:2699] typeset PS4_FUNC +epprd_rg:process_resources(8.483)[sync_volume_groups:2700] [[ high == high ]] +epprd_rg:process_resources(8.483)[sync_volume_groups:2700] set -x +epprd_rg:process_resources(8.483)[sync_volume_groups:2701] STAT=0 +epprd_rg:process_resources(8.483)[sync_volume_groups:2704] export GROUPNAME +epprd_rg:process_resources(8.484)[sync_volume_groups:2706] get_list_head datavg +epprd_rg:process_resources(8.484)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(8.484)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(8.484)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(8.484)[get_list_head:60] set -x +epprd_rg:process_resources(8.485)[get_list_head:61] echo datavg +epprd_rg:process_resources(8.485)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(8.485)[get_list_head:61] IFS=: +epprd_rg:process_resources(8.487)[get_list_head:62] echo datavg +epprd_rg:process_resources(8.487)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(8.484)[sync_volume_groups:2706] read LIST_OF_VOLUME_GROUPS_FOR_RG +epprd_rg:process_resources(8.490)[sync_volume_groups:2707] get_list_tail datavg +epprd_rg:process_resources(8.491)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(8.491)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(8.491)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(8.491)[get_list_tail:68] set -x +epprd_rg:process_resources(8.492)[get_list_tail:69] echo datavg +epprd_rg:process_resources(8.492)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(8.493)[get_list_tail:69] IFS=: +epprd_rg:process_resources(8.493)[get_list_tail:70] echo +epprd_rg:process_resources(8.492)[sync_volume_groups:2707] read VOLUME_GROUPS +epprd_rg:process_resources(8.494)[sync_volume_groups:2710] : Sync the active volume groups +epprd_rg:process_resources(8.495)[sync_volume_groups:2712] lsvg -L -o +epprd_rg:process_resources(8.495)[sync_volume_groups:2712] 2> /tmp/lsvg.err +epprd_rg:process_resources(8.495)[sync_volume_groups:2712] sort +epprd_rg:process_resources(8.497)[sync_volume_groups:2712] 1> /tmp/lsvg.out.19923274 +epprd_rg:process_resources(8.504)[sync_volume_groups:2713] echo datavg +epprd_rg:process_resources(8.505)[sync_volume_groups:2713] tr ' ' '\n' +epprd_rg:process_resources(8.506)[sync_volume_groups:2714] sort +epprd_rg:process_resources(8.507)[sync_volume_groups:2714] comm -12 /tmp/lsvg.out.19923274 - +epprd_rg:process_resources(8.512)[sync_volume_groups:2718] [[ -s /tmp/lsvg.err ]] +epprd_rg:process_resources(8.513)[sync_volume_groups:2723] rm -f /tmp/lsvg.out.19923274 /tmp/lsvg.err +epprd_rg:process_resources(8.513)[sync_volume_groups:2716] cl_sync_vgs datavg +epprd_rg:process_resources(8.517)[sync_volume_groups:2732] unset AM_SYNC_CALLED_BY +epprd_rg:process_resources(8.517)[sync_volume_groups:2734] return 0 +epprd_rg:process_resources(8.517)[3324] true +epprd_rg:process_resources(8.517)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(8.517)[3328] set -a +epprd_rg:process_resources(8.517)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa +epprd_rg:cl_sync_vgs[303] version=1.24.1.4 +epprd_rg:cl_sync_vgs[306] (( 1 == 0 )) +epprd_rg:cl_sync_vgs[312] : syncing 4 stale PPs at a time seems to be a win most of the time, but +epprd_rg:cl_sync_vgs[313] : we honor the NUM_PARALLEL_LPS value from /etc/environment, as does +epprd_rg:cl_sync_vgs[314] : syncvg. +epprd_rg:cl_sync_vgs[316] syncflag='' +epprd_rg:cl_sync_vgs[316] export syncflag +epprd_rg:cl_sync_vgs[317] PS4_LOOP='' +epprd_rg:cl_sync_vgs[317] export PS4_LOOP +epprd_rg:cl_sync_vgs[318] typeset -i npl +epprd_rg:cl_sync_vgs[319] grep -q ^NUM_PARALLEL_LPS= /etc/environment 2023-09-30T03:25:23.353871 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:cl_sync_vgs[321] syncflag=-P4 +epprd_rg:cl_sync_vgs[328] echo 'NOTE: While the sync is going on, volume group can be used' NOTE: While the sync is going on, volume group can be used +epprd_rg:cl_sync_vgs[331] : For GLVM volume groups, read PARALLEL LPS count from HACMPresource if it is set from GUI, +epprd_rg:cl_sync_vgs[332] : else, read from environment variables, if it is not set use 32 as default value. +epprd_rg:cl_sync_vgs[334] clodmget -q name='GMVG_REP_RESOURCE and value=datavg' -f group HACMPresource +epprd_rg:process_resources(8.531)[3329] eval JOB_TYPE=EXPORT_FILESYSTEMS ACTION=ACQUIRE EXPORT_FILE_SYSTEMS='"/board_org,/sapmnt/EPP"' EXPORT_FILE_SYSTEMS_V4='""' RESOURCE_GROUPS='"epprd_rg' '"' STABLE_STORAGE_PATH='""' IP_LABELS='"epprd:epprda:epprds"' DAEMONS='"NFS' 'RPCLOCKD"' +epprd_rg:process_resources(8.531)[1] JOB_TYPE=EXPORT_FILESYSTEMS +epprd_rg:process_resources(8.531)[1] ACTION=ACQUIRE +epprd_rg:process_resources(8.531)[1] EXPORT_FILE_SYSTEMS=/board_org,/sapmnt/EPP +epprd_rg:process_resources(8.531)[1] EXPORT_FILE_SYSTEMS_V4='' +epprd_rg:process_resources(8.531)[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources(8.531)[1] STABLE_STORAGE_PATH='' +epprd_rg:process_resources(8.531)[1] IP_LABELS=epprd:epprda:epprds +epprd_rg:process_resources(8.531)[1] DAEMONS='NFS RPCLOCKD' +epprd_rg:process_resources(8.531)[3330] RC=0 +epprd_rg:process_resources(8.531)[3331] set +a +epprd_rg:process_resources(8.531)[3333] (( 0 != 0 )) +epprd_rg:process_resources(8.531)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(8.531)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(8.531)[3343] export GROUPNAME +epprd_rg:process_resources(8.531)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(8.531)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(8.531)[3360] [[ EXPORT_FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources(8.531)[3360] [[ EXPORT_FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources(8.531)[3595] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources(8.531)[3597] export_filesystems +epprd_rg:process_resources(8.531)[export_filesystems:1621] PS4_FUNC=export_filesystems +epprd_rg:process_resources(8.531)[export_filesystems:1621] typeset PS4_FUNC +epprd_rg:process_resources(8.531)[export_filesystems:1622] [[ high == high ]] +epprd_rg:process_resources(8.531)[export_filesystems:1622] set -x +epprd_rg:process_resources(8.531)[export_filesystems:1623] STAT=0 +epprd_rg:process_resources(8.531)[export_filesystems:1624] NFSSTOPPED=0 +epprd_rg:process_resources(8.531)[export_filesystems:1629] [[ NFS == RPCLOCKD ]] +epprd_rg:process_resources(8.531)[export_filesystems:1629] [[ RPCLOCKD == RPCLOCKD ]] +epprd_rg:process_resources(8.531)[export_filesystems:1631] stopsrc -s rpc.lockd +epprd_rg:cl_sync_vgs[334] 2> /dev/null +epprd_rg:cl_sync_vgs[334] glvm_rg='' +epprd_rg:cl_sync_vgs[335] [[ -n '' ]] +epprd_rg:cl_sync_vgs[353] check_sync datavg +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:76] typeset vg_name +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:77] typeset vgid +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:78] typeset disklist +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:79] typeset lv_name +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:80] typeset -li stale_count +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:81] typeset -li mode +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:82] RC=0 +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:82] typeset -li RC +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:83] typeset site_node_list +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:84] typeset site_choice +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:86] vg_name=datavg +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:87] disklist='' +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:89] getlvodm -v datavg +epprd_rg:cl_sync_vgs(0.020):datavg[check_sync:89] vgid=00c44af100004b00000001851e9dc053 +epprd_rg:cl_sync_vgs(0.020):datavg[check_sync:92] : find disks in the VG that LVM thinks are inaccessable 0513-044 The rpc.lockd Subsystem was requested to stop. +epprd_rg:cl_sync_vgs(0.020):datavg[check_sync:94] lsvg -L -p datavg +epprd_rg:process_resources(8.538)[export_filesystems:1633] touch /tmp/.RPCLOCKDSTOPPED +epprd_rg:cl_sync_vgs(0.021):datavg[check_sync:94] LC_ALL=C +epprd_rg:process_resources(8.542)[export_filesystems:1638] : For NFSv4, cl_export_fs will use STABLE_STORAGE_PATH, which is set by +epprd_rg:process_resources(8.542)[export_filesystems:1639] : clRGPA and can have colon-separated values for multiple RGs. +epprd_rg:process_resources(8.543)[export_filesystems:1640] : We will save off clRGPA values in stable_storage_path and then extract +epprd_rg:process_resources(8.543)[export_filesystems:1641] : each RG into STABLE_STORAGE_PATH for cl_unexport_fs. +epprd_rg:process_resources(8.543)[export_filesystems:1643] stable_storage_path='' +epprd_rg:process_resources(8.543)[export_filesystems:1643] typeset stable_storage_path +epprd_rg:process_resources(8.543)[export_filesystems:1645] export NFSSTOPPED +epprd_rg:process_resources(8.543)[export_filesystems:1650] export GROUPNAME +epprd_rg:process_resources(8.544)[export_filesystems:1652] get_list_head /board_org,/sapmnt/EPP +epprd_rg:process_resources(8.544)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(8.544)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(8.544)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(8.544)[get_list_head:60] set -x +epprd_rg:process_resources(8.545)[get_list_head:61] echo /board_org,/sapmnt/EPP +epprd_rg:process_resources(8.547)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(8.547)[get_list_head:61] IFS=: +epprd_rg:process_resources(8.548)[get_list_head:62] echo /board_org,/sapmnt/EPP +epprd_rg:process_resources(8.549)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(8.545)[export_filesystems:1652] read LIST_OF_EXPORT_FILE_SYSTEMS_FOR_RG +epprd_rg:process_resources(8.552)[export_filesystems:1653] get_list_tail /board_org,/sapmnt/EPP +epprd_rg:process_resources(8.553)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(8.553)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(8.553)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(8.553)[get_list_tail:68] set -x +epprd_rg:process_resources(8.554)[get_list_tail:69] echo /board_org,/sapmnt/EPP +epprd_rg:process_resources(8.554)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(8.554)[get_list_tail:69] IFS=: +epprd_rg:process_resources(8.555)[get_list_tail:70] echo +epprd_rg:process_resources(8.554)[export_filesystems:1653] read EXPORT_FILE_SYSTEMS +epprd_rg:process_resources(8.557)[export_filesystems:1654] get_list_head +epprd_rg:process_resources(8.557)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(8.557)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(8.557)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(8.557)[get_list_head:60] set -x +epprd_rg:process_resources(8.559)[get_list_head:61] echo +epprd_rg:process_resources(8.558)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(8.559)[get_list_head:61] IFS=: +epprd_rg:process_resources(8.560)[get_list_head:62] echo +epprd_rg:process_resources(8.560)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(8.557)[export_filesystems:1654] read LIST_OF_EXPORT_FILE_SYSTEMS_V4_FOR_RG +epprd_rg:cl_sync_vgs(0.048):datavg[check_sync:94] disklist=$'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:process_resources(8.565)[export_filesystems:1655] get_list_tail +epprd_rg:process_resources(8.565)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(8.566)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(8.566)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(8.566)[get_list_tail:68] set -x +epprd_rg:cl_sync_vgs(0.049):datavg[check_sync:95] print -- $'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:process_resources(8.567)[get_list_tail:69] echo +epprd_rg:process_resources(8.568)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(8.568)[get_list_tail:69] IFS=: +epprd_rg:process_resources(8.568)[get_list_tail:70] echo +epprd_rg:cl_sync_vgs(0.051):datavg[check_sync:95] grep -w missing +epprd_rg:cl_sync_vgs(0.053):datavg[check_sync:95] cut -f1 '-d ' +epprd_rg:process_resources(8.570)[export_filesystems:1655] read EXPORT_FILE_SYSTEMS_V4 +epprd_rg:process_resources(8.572)[export_filesystems:1656] get_list_head +epprd_rg:process_resources(8.572)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(8.572)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(8.572)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(8.572)[get_list_head:60] set -x +epprd_rg:cl_sync_vgs(0.056):datavg[check_sync:95] missing_disklist='' +epprd_rg:process_resources(8.573)[get_list_head:61] echo +epprd_rg:process_resources(8.574)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(8.574)[get_list_head:61] IFS=: +epprd_rg:cl_sync_vgs(0.057):datavg[check_sync:96] print -- $'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:process_resources(8.575)[get_list_head:62] echo +epprd_rg:cl_sync_vgs(0.059):datavg[check_sync:96] grep -w removed +epprd_rg:process_resources(8.577)[get_list_head:62] tr , ' ' +epprd_rg:cl_sync_vgs(0.061):datavg[check_sync:96] cut -f1 '-d ' +epprd_rg:process_resources(8.581)[export_filesystems:1656] read STABLE_STORAGE_PATH +epprd_rg:cl_sync_vgs(0.064):datavg[check_sync:96] removed_disklist='' +epprd_rg:cl_sync_vgs(0.064):datavg[check_sync:100] : Proceeed if there are some disks that LVM thinks are inaccessable +epprd_rg:cl_sync_vgs(0.064):datavg[check_sync:102] [[ -n '' ]] +epprd_rg:cl_sync_vgs(0.064):datavg[check_sync:102] [[ -n '' ]] +epprd_rg:cl_sync_vgs(0.064):datavg[check_sync:196] : sync if any LVs in the VG that have stale partitions +epprd_rg:cl_sync_vgs(0.064):datavg[check_sync:198] (( 0 == 0 )) +epprd_rg:cl_sync_vgs(0.064):datavg[check_sync:201] : A status of 2,3,5 or 7 indicates the presence of dirty and/or stale partitions +epprd_rg:cl_sync_vgs(0.064):datavg[check_sync:213] is_start_logged=0 +epprd_rg:cl_sync_vgs(0.064):datavg[check_sync:218] at_least_one_sync_success=0 +epprd_rg:cl_sync_vgs(0.065):datavg[check_sync:219] lqueryvg -g 00c44af100004b00000001851e9dc053 -L +epprd_rg:process_resources(8.582)[export_filesystems:1657] get_list_tail +epprd_rg:process_resources(8.582)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(8.582)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(8.582)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(8.582)[get_list_tail:68] set -x +epprd_rg:process_resources(8.583)[get_list_tail:69] echo +epprd_rg:process_resources(8.585)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(8.585)[get_list_tail:69] IFS=: +epprd_rg:process_resources(8.585)[get_list_tail:70] echo +epprd_rg:process_resources(8.585)[export_filesystems:1657] read stable_storage_path +epprd_rg:process_resources(8.585)[export_filesystems:1659] cl_export_fs epprd:epprda:epprds '/board_org /sapmnt/EPP' '' +epprd_rg:cl_export_fs[102] version=%I% +epprd_rg:cl_export_fs[105] . /usr/es/sbin/cluster/events/utils/cl_nfs_utils +epprd_rg:cl_export_fs[98] PROGNAME=cl_export_fs +epprd_rg:cl_export_fs[99] [[ high == high ]] +epprd_rg:cl_export_fs[101] set -x +epprd_rg:cl_export_fs[102] version=%I +epprd_rg:cl_export_fs[105] cl_exports_data='' +epprd_rg:cl_export_fs[105] typeset cl_exports_data +epprd_rg:cl_export_fs[106] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_export_fs[107] HOST=epprd:epprda:epprds +epprd_rg:cl_export_fs[108] EXPORT_V3='/board_org /sapmnt/EPP' +epprd_rg:cl_export_fs[109] EXPORT_V4='' +epprd_rg:cl_export_fs[111] STATUS=0 +epprd_rg:cl_export_fs[113] LIMIT=60 +epprd_rg:cl_export_fs[113] WAIT=1 +epprd_rg:cl_export_fs[113] TRY=0 +epprd_rg:cl_export_fs[113] typeset -li LIMIT WAIT TRY +epprd_rg:cl_export_fs[115] PROC_RES=false +epprd_rg:cl_export_fs[118] : If JOB_TYPE is set, and it does not equal to GROUP, then +epprd_rg:cl_export_fs[119] : we are processing for process_resources +epprd_rg:cl_export_fs[121] [[ EXPORT_FILESYSTEMS != 0 ]] +epprd_rg:cl_export_fs[121] [[ EXPORT_FILESYSTEMS != GROUP ]] +epprd_rg:cl_export_fs[122] PROC_RES=true +epprd_rg:cl_export_fs[125] set -u +epprd_rg:cl_export_fs[127] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_export_fs[129] (( 3 < 2 || 3 > 3 )) +epprd_rg:cl_export_fs[142] DARE_EVENT=reconfig_resource_acquire +epprd_rg:cl_export_fs[145] : Check memory to see if NFSv4 exports have been configured. +epprd_rg:cl_export_fs[147] export_v4='' +epprd_rg:cl_export_fs[148] [[ -z '' ]] +epprd_rg:cl_export_fs[148] [[ rg_move == reconfig_resource_acquire ]] +epprd_rg:cl_export_fs[158] : If we do not have NFSv4 exports configured, then determine +epprd_rg:cl_export_fs[159] : the protocol versions from the HACMP exports file. +epprd_rg:cl_export_fs[161] [[ -z '' ]] +epprd_rg:cl_export_fs[161] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[163] export_v3='' +epprd_rg:cl_export_fs[169] : Get the export file for the filesystem from the exports file. +epprd_rg:cl_export_fs[170] : Only look at the part of the line preceding comments. +epprd_rg:cl_export_fs[173] getline_exports /board_org +epprd_rg:cl_export_fs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_export_fs[getline_exports:45] line='' +epprd_rg:cl_export_fs[getline_exports:45] typeset line +epprd_rg:cl_export_fs[getline_exports:46] flag=0 +epprd_rg:cl_export_fs[getline_exports:46] typeset -i flag +epprd_rg:cl_export_fs[getline_exports:47] fs=/board_org +epprd_rg:cl_export_fs[getline_exports:49] [[ -z /board_org ]] +epprd_rg:cl_export_fs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_export_fs[getline_exports:56] read -r line +epprd_rg:cl_export_fs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_export_fs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_export_fs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:69] (( 1 == 0 )) +epprd_rg:cl_export_fs[getline_exports:74] [[ 0 == 1 ]] +epprd_rg:cl_export_fs[getline_exports:56] read -r line +epprd_rg:cl_export_fs[getline_exports:59] line='/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_export_fs[getline_exports:60] line='/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_export_fs[getline_exports:63] [[ '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' == #* ]] +epprd_rg:cl_export_fs[getline_exports:68] echo '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_sync_vgs(0.068):datavg[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.087):datavg[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.087):datavg.epprdaloglv[check_sync:221] PS4_LOOP=datavg.epprdaloglv +epprd_rg:cl_sync_vgs(0.087):datavg.epprdaloglv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.087):datavg.epprdaloglv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.087):datavg.epprdaloglv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.087):datavg.epprdaloglv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.087):datavg.epprdaloglv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.087):datavg.saplv[check_sync:221] PS4_LOOP=datavg.saplv +epprd_rg:cl_sync_vgs(0.087):datavg.saplv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.087):datavg.saplv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.087):datavg.saplv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.087):datavg.saplv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.087):datavg.saplv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.087):datavg.sapmntlv[check_sync:221] PS4_LOOP=datavg.sapmntlv +epprd_rg:cl_sync_vgs(0.087):datavg.sapmntlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.087):datavg.sapmntlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.087):datavg.sapmntlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.087):datavg.sapmntlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.087):datavg.sapmntlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.087):datavg.oraclelv[check_sync:221] PS4_LOOP=datavg.oraclelv +epprd_rg:cl_sync_vgs(0.087):datavg.oraclelv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.087):datavg.oraclelv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.087):datavg.oraclelv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.087):datavg.oraclelv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.087):datavg.oraclelv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.087):datavg.epplv[check_sync:221] PS4_LOOP=datavg.epplv +epprd_rg:cl_sync_vgs(0.087):datavg.epplv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.087):datavg.epplv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.087):datavg.epplv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.087):datavg.epplv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.087):datavg.epplv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.087):datavg.oraarchlv[check_sync:221] PS4_LOOP=datavg.oraarchlv +epprd_rg:cl_sync_vgs(0.087):datavg.oraarchlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.087):datavg.oraarchlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.087):datavg.oraarchlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.087):datavg.oraarchlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.087):datavg.oraarchlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.087):datavg.sapdata1lv[check_sync:221] PS4_LOOP=datavg.sapdata1lv +epprd_rg:cl_sync_vgs(0.087):datavg.sapdata1lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.087):datavg.sapdata1lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.087):datavg.sapdata1lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.087):datavg.sapdata1lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.087):datavg.sapdata1lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.087):datavg.sapdata2lv[check_sync:221] PS4_LOOP=datavg.sapdata2lv +epprd_rg:cl_sync_vgs(0.087):datavg.sapdata2lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.087):datavg.sapdata2lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.087):datavg.sapdata2lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.087):datavg.sapdata2lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.087):datavg.sapdata2lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.087):datavg.sapdata3lv[check_sync:221] PS4_LOOP=datavg.sapdata3lv +epprd_rg:cl_sync_vgs(0.087):datavg.sapdata3lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.088):datavg.sapdata3lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.088):datavg.sapdata3lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.088):datavg.sapdata3lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.088):datavg.sapdata3lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.088):datavg.sapdata4lv[check_sync:221] PS4_LOOP=datavg.sapdata4lv +epprd_rg:cl_sync_vgs(0.088):datavg.sapdata4lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.088):datavg.sapdata4lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.088):datavg.sapdata4lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.088):datavg.sapdata4lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.088):datavg.sapdata4lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.088):datavg.boardlv[check_sync:221] PS4_LOOP=datavg.boardlv +epprd_rg:cl_sync_vgs(0.088):datavg.boardlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.088):datavg.boardlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.088):datavg.boardlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.088):datavg.boardlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.088):datavg.boardlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.088):datavg.origlogAlv[che+epprd_rg:cl_export_fs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' ck_sync:221] PS4_LOOP=datavg.origlogAlv +epprd_rg:cl_sync_vgs(0.088):datavg.origlogAlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.088):datavg.origlogAlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.088):datavg.origlogAlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.088):datavg.origlogAlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.088):datavg.origlogAlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.088):datavg.origlogBlv[check_sync:221] PS4_LOOP=datavg.origlogBlv +epprd_rg:cl_sync_vgs(0.088):datavg.origlogBlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.088):datavg.origlogBlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.088):datavg.origlogBlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.088):datavg.origlogBlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.088):datavg.origlogBlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.088):datavg.mirrlogAlv[check_sync:221] PS4_LOOP=datavg.mirrlogAlv +epprd_rg:cl_sync_vgs(0.088):datavg.mirrlogAlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.088):datavg.mirrlogAlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.088):datavg.mirrlogAlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.088):datavg.mirrlogAlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.088):datavg.mirrlogAlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.088):datavg.mirrlogBlv[check_sync:221] PS4_LOOP=datavg.mirrlogBlv +epprd_rg:cl_sync_vgs(0.088):datavg.mirrlogBlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.088):datavg.mirrlogBlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.088):datavg.mirrlogBlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.088):datavg.mirrlogBlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.088):datavg.mirrlogBlv[check_sync:268] [[ -n RG_MOVE ]] +epprd_rg:cl_sync_vgs(0.088):datavg.mirrlogBlv[check_sync:268] (( 0 == 1 )) +epprd_rg:cl_sync_vgs[355] exit 0 +epprd_rg:cl_export_fs[getline_exports:69] (( 0 == 0 )) +epprd_rg:cl_export_fs[getline_exports:71] flag=1 +epprd_rg:cl_export_fs[getline_exports:74] [[ 1 == 1 ]] +epprd_rg:cl_export_fs[getline_exports:76] grep -w '\\' +epprd_rg:cl_export_fs[getline_exports:76] echo '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_export_fs[getline_exports:76] [[ -n '' ]] +epprd_rg:cl_export_fs[getline_exports:81] cl_exports_data=' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_export_fs[getline_exports:82] break +epprd_rg:cl_export_fs[getline_exports:89] return 0 +epprd_rg:cl_export_fs[174] export_line=' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_export_fs[177] : The line is of the format: filesystem -option1,option2,... +epprd_rg:cl_export_fs[178] : This will give 'option1 option2 ...' +epprd_rg:cl_export_fs[180] echo /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds +epprd_rg:cl_export_fs[180] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_export_fs[180] cut -d- -f2- +epprd_rg:cl_export_fs[181] tr , ' ' +epprd_rg:cl_export_fs[180] options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap:epprd:epprda:epprds root=epprdap:epprd:epprda:epprds ' +epprd_rg:cl_export_fs[184] : Each option can be of the format name=value, or just name. +epprd_rg:cl_export_fs[185] : We only care about the vers option. +epprd_rg:cl_export_fs[189] : Have we seen the vers 'option?' +epprd_rg:cl_export_fs[191] vers_missing=1 +epprd_rg:cl_export_fs[194] : Loop through all of the export options for this export. +epprd_rg:cl_export_fs[218] : If we did not find the vers option, then NFSv3 is the default. +epprd_rg:cl_export_fs[220] (( vers_missing )) +epprd_rg:cl_export_fs[220] export_v3=' /board_org' +epprd_rg:cl_export_fs[169] : Get the export file for the filesystem from the exports file. +epprd_rg:cl_export_fs[170] : Only look at the part of the line preceding comments. +epprd_rg:cl_export_fs[173] getline_exports /sapmnt/EPP +epprd_rg:cl_export_fs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_export_fs[getline_exports:45] line='' +epprd_rg:cl_export_fs[getline_exports:45] typeset line +epprd_rg:cl_export_fs[getline_exports:46] flag=0 +epprd_rg:cl_export_fs[getline_exports:46] typeset -i flag +epprd_rg:cl_export_fs[getline_exports:47] fs=/sapmnt/EPP +epprd_rg:cl_export_fs[getline_exports:49] [[ -z /sapmnt/EPP ]] +epprd_rg:cl_export_fs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_export_fs[getline_exports:56] read -r line +epprd_rg:cl_export_fs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_export_fs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:68] grep -q '^[[:space:]]*/sapmnt/EPP[[:space:]]' +epprd_rg:cl_export_fs[getline_exports:69] (( 0 == 0 )) +epprd_rg:cl_export_fs[getline_exports:71] flag=1 +epprd_rg:cl_export_fs[getline_exports:74] [[ 1 == 1 ]] +epprd_rg:cl_export_fs[getline_exports:76] grep -w '\\' +epprd_rg:cl_export_fs[getline_exports:76] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:76] [[ -n '' ]] +epprd_rg:cl_export_fs[getline_exports:81] cl_exports_data=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:82] break +epprd_rg:cl_export_fs[getline_exports:89] return 0 +epprd_rg:cl_export_fs[174] export_line=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[177] : The line is of the format: filesystem -option1,option2,... +epprd_rg:cl_export_fs[178] : This will give 'option1 option2 ...' +epprd_rg:cl_export_fs[180] echo /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_export_fs[180] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_export_fs[180] cut -d- -f2- +epprd_rg:cl_export_fs[181] tr , ' ' +epprd_rg:cl_export_fs[180] options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap root=epprdap ' +epprd_rg:cl_export_fs[184] : Each option can be of the format name=value, or just name. +epprd_rg:cl_export_fs[185] : We only care about the vers option. +epprd_rg:cl_export_fs[189] : Have we seen the vers 'option?' +epprd_rg:cl_export_fs[191] vers_missing=1 +epprd_rg:cl_export_fs[194] : Loop through all of the export options for this export. +epprd_rg:cl_export_fs[218] : If we did not find the vers option, then NFSv3 is the default. +epprd_rg:cl_export_fs[220] (( vers_missing )) +epprd_rg:cl_export_fs[220] export_v3=' /board_org /sapmnt/EPP' +epprd_rg:cl_export_fs[223] EXPORT_V3=' /board_org /sapmnt/EPP' +epprd_rg:cl_export_fs[224] EXPORT_V4='' +epprd_rg:cl_export_fs[227] /usr/sbin/bootinfo -K +epprd_rg:cl_export_fs[227] KERNEL_BITS=64 +epprd_rg:cl_export_fs[229] subsystems='nfsd rpc.mountd' +epprd_rg:cl_export_fs[230] [[ -n '' ]] +epprd_rg:cl_export_fs[233] : Special processing for cross mounts of EFS keys +epprd_rg:cl_export_fs[234] : The overmount of /var/efs must be removed prior +epprd_rg:cl_export_fs[235] : to stopping or restarting NFS, since the SRC +epprd_rg:cl_export_fs[236] : operations will attempt to check the EFS enablement. +epprd_rg:cl_export_fs[238] grep -w /var/efs +epprd_rg:cl_export_fs[238] mount +epprd_rg:cl_export_fs[238] mounted_info='' +epprd_rg:cl_export_fs[239] [[ -n '' ]] +epprd_rg:cl_export_fs[295] : Kill and restart everything in '"nfsd' 'rpc.mountd"' +epprd_rg:cl_export_fs[299] : Kill nfsd, and restart it below +epprd_rg:cl_export_fs[306] [[ nfsd == nfsd ]] +epprd_rg:cl_export_fs[307] [[ 64 == 64 ]] +epprd_rg:cl_export_fs[307] [[ -x /usr/sbin/nfs4smctl ]] +epprd_rg:cl_export_fs[308] [[ ! -s /etc/xtab ]] +epprd_rg:cl_export_fs[311] clcheck_server nfsd +epprd_rg:clcheck_server[118] [[ high == high ]] +epprd_rg:clcheck_server[118] version=1.10.4.2 +epprd_rg:clcheck_server[119] cl_get_path +epprd_rg:clcheck_server[119] HA_DIR=es +epprd_rg:clcheck_server[121] SERVER=nfsd +epprd_rg:clcheck_server[122] STATUS=0 +epprd_rg:clcheck_server[123] FATAL_ERROR=255 +epprd_rg:clcheck_server[124] retries=0 +epprd_rg:clcheck_server[124] typeset -li retries +epprd_rg:clcheck_server[126] [[ -n nfsd ]] +epprd_rg:clcheck_server[131] lssrc -s nfsd +epprd_rg:clcheck_server[131] LC_ALL=C +epprd_rg:clcheck_server[131] grep 'not on file' +epprd_rg:clcheck_server[131] wc -l +epprd_rg:clcheck_server[131] rc=' 0' +epprd_rg:clcheck_server[133] (( 0 == 1 )) +epprd_rg:clcheck_server[143] [[ 0 =~ 3 ]] +epprd_rg:clcheck_server[147] lssrc -s nfsd +epprd_rg:clcheck_server[147] 1> /dev/null 2> /dev/null +epprd_rg:clcheck_server[161] egrep 'stop|active' +epprd_rg:clcheck_server[161] lssrc -s nfsd +epprd_rg:clcheck_server[161] LC_ALL=C +epprd_rg:clcheck_server[161] check_if_down='' +epprd_rg:clcheck_server[166] [[ -z '' ]] +epprd_rg:clcheck_server[171] sleep 1 +epprd_rg:clcheck_server[172] egrep 'stop|active' +epprd_rg:clcheck_server[172] lssrc -s nfsd +epprd_rg:clcheck_server[172] LC_ALL=C +epprd_rg:clcheck_server[172] check_if_down='' +epprd_rg:clcheck_server[173] [[ -z '' ]] +epprd_rg:clcheck_server[177] return 0 +epprd_rg:cl_export_fs[313] startsrc -s nfsd 0513-059 The nfsd Subsystem has been started. Subsystem PID is 19464484. +epprd_rg:cl_export_fs[314] rc=0 +epprd_rg:cl_export_fs[315] (( 0 == 0 )) +epprd_rg:cl_export_fs[317] sleep 3 +epprd_rg:cl_export_fs[318] lssrc -s nfsd +epprd_rg:cl_export_fs[318] LC_ALL=C +epprd_rg:cl_export_fs[318] tail +2 +epprd_rg:cl_export_fs[318] subsys_state=' nfsd nfs 19464484 active' +epprd_rg:cl_export_fs[321] (( 0 != 0 )) +epprd_rg:cl_export_fs[321] print -- ' nfsd nfs 19464484 active' +epprd_rg:cl_export_fs[321] grep -qw active +epprd_rg:cl_export_fs[329] : nfsv4 daemon not stopped due to existing mounts +epprd_rg:cl_export_fs[330] : Turn on NFSv4 grace periods and ignore any errors. +epprd_rg:cl_export_fs[332] chnfs -I -g on -x 1 +epprd_rg:cl_export_fs[332] ODMDIR=/etc/objrepos 0513-077 Subsystem has been changed. 0513-077 Subsystem has been changed. +epprd_rg:cl_export_fs[299] : Kill rpc.mountd, and restart it below +epprd_rg:cl_export_fs[306] [[ rpc.mountd == nfsd ]] +epprd_rg:cl_export_fs[336] : Friendly stop of rpc.mountd +epprd_rg:cl_export_fs[338] lssrc -s rpc.mountd +epprd_rg:cl_export_fs[338] LC_ALL=C +epprd_rg:cl_export_fs[338] tail +2 +epprd_rg:cl_export_fs[338] grep -qw active +epprd_rg:cl_export_fs[341] : Now, wait for rpc.mountd to die +epprd_rg:cl_export_fs[343] (( TRY=0)) +epprd_rg:cl_export_fs[343] (( 0 < 60)) +epprd_rg:cl_export_fs[345] tail +2 +epprd_rg:cl_export_fs[345] lssrc -s rpc.mountd +epprd_rg:cl_export_fs[345] LC_ALL=C +epprd_rg:cl_export_fs[345] subsys_state=' rpc.mountd nfs inoperative' +epprd_rg:cl_export_fs[346] print -- ' rpc.mountd nfs inoperative' +epprd_rg:cl_export_fs[346] grep -qw inoperative +epprd_rg:cl_export_fs[348] [[ high == high ]] +epprd_rg:cl_export_fs[348] set -x +epprd_rg:cl_export_fs[349] subsys_state=inoperative +epprd_rg:cl_export_fs[350] break +epprd_rg:cl_export_fs[356] [[ high == high ]] +epprd_rg:cl_export_fs[356] set -x +epprd_rg:cl_export_fs[358] [[ inoperative != inoperative ]] +epprd_rg:cl_export_fs[382] : If stopsrc has failed to stop rpc.mountd, +epprd_rg:cl_export_fs[383] : use a real kill on the daemon +epprd_rg:cl_export_fs[385] ps -eo comm,pid +epprd_rg:cl_export_fs[385] grep -w rpc.mountd +epprd_rg:cl_export_fs[385] grep -vw grep +epprd_rg:cl_export_fs[385] read skip subsys_pid rest +epprd_rg:cl_export_fs[386] [[ '' == +([0-9]) ]] +epprd_rg:cl_export_fs[389] : If rpc.mountd has been stopped, +epprd_rg:cl_export_fs[390] : start it back up again. +epprd_rg:cl_export_fs[392] clcheck_server rpc.mountd +epprd_rg:clcheck_server[118] [[ high == high ]] +epprd_rg:clcheck_server[118] version=1.10.4.2 +epprd_rg:clcheck_server[119] cl_get_path +epprd_rg:clcheck_server[119] HA_DIR=es +epprd_rg:clcheck_server[121] SERVER=rpc.mountd +epprd_rg:clcheck_server[122] STATUS=0 +epprd_rg:clcheck_server[123] FATAL_ERROR=255 +epprd_rg:clcheck_server[124] retries=0 +epprd_rg:clcheck_server[124] typeset -li retries +epprd_rg:clcheck_server[126] [[ -n rpc.mountd ]] +epprd_rg:clcheck_server[131] lssrc -s rpc.mountd +epprd_rg:clcheck_server[131] LC_ALL=C +epprd_rg:clcheck_server[131] grep 'not on file' +epprd_rg:clcheck_server[131] wc -l +epprd_rg:clcheck_server[131] rc=' 0' +epprd_rg:clcheck_server[133] (( 0 == 1 )) +epprd_rg:clcheck_server[143] [[ 0 =~ 3 ]] +epprd_rg:clcheck_server[147] lssrc -s rpc.mountd +epprd_rg:clcheck_server[147] 1> /dev/null 2> /dev/null +epprd_rg:clcheck_server[161] lssrc -s rpc.mountd +epprd_rg:clcheck_server[161] LC_ALL=C +epprd_rg:clcheck_server[161] egrep 'stop|active' +epprd_rg:clcheck_server[161] check_if_down='' +epprd_rg:clcheck_server[166] [[ -z '' ]] +epprd_rg:clcheck_server[171] sleep 1 +epprd_rg:clcheck_server[172] lssrc -s rpc.mountd +epprd_rg:clcheck_server[172] LC_ALL=C +epprd_rg:clcheck_server[172] egrep 'stop|active' +epprd_rg:clcheck_server[172] check_if_down='' +epprd_rg:clcheck_server[173] [[ -z '' ]] +epprd_rg:clcheck_server[177] return 0 +epprd_rg:cl_export_fs[394] [[ rpc.mountd == nfsd ]] +epprd_rg:cl_export_fs[403] : Start rpc.mountd back up again +epprd_rg:cl_export_fs[405] startsrc -s rpc.mountd 0513-059 The rpc.mountd Subsystem has been started. Subsystem PID is 22086050. +epprd_rg:cl_export_fs[406] rc=0 +epprd_rg:cl_export_fs[407] (( 0 == 0 )) +epprd_rg:cl_export_fs[409] sleep 3 +epprd_rg:cl_export_fs[410] lssrc -s rpc.mountd +epprd_rg:cl_export_fs[410] LC_ALL=C +epprd_rg:cl_export_fs[410] tail +2 +epprd_rg:cl_export_fs[410] subsys_state=' rpc.mountd nfs 22086050 active' +epprd_rg:cl_export_fs[413] (( 0 != 0 )) +epprd_rg:cl_export_fs[413] print -- ' rpc.mountd nfs 22086050 active' +epprd_rg:cl_export_fs[413] grep -qw active +epprd_rg:cl_export_fs[431] : Set the NFSv4 nfsroot parameter. This must be set prior to any +epprd_rg:cl_export_fs[432] : NFS exports that use the exname option, and cannot be set to a new +epprd_rg:cl_export_fs[433] : value if any exname exports already exist. This is normally done +epprd_rg:cl_export_fs[434] : at IPL, but rc.nfs is not run at boot when HACMP is installed. +epprd_rg:cl_export_fs[436] [[ -n '' ]] +epprd_rg:cl_export_fs[438] hasrv='' +epprd_rg:cl_export_fs[440] [[ -z '' ]] +epprd_rg:cl_export_fs[442] query=name='STABLE_STORAGE_PATH AND group=epprd_rg' +epprd_rg:cl_export_fs[443] odmget -q name='STABLE_STORAGE_PATH AND group=epprd_rg' HACMPresource +epprd_rg:cl_export_fs[444] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_export_fs[443] STABLE_STORAGE_PATH='' +epprd_rg:cl_export_fs[447] [[ -z '' ]] +epprd_rg:cl_export_fs[449] STABLE_STORAGE_PATH=/var/adm/nfsv4.hacmp/epprd_rg +epprd_rg:cl_export_fs[452] [[ -z '' ]] +epprd_rg:cl_export_fs[454] query=name='STABLE_STORAGE_COOKIE AND group=epprd_rg' +epprd_rg:cl_export_fs[455] odmget -q name='STABLE_STORAGE_COOKIE AND group=epprd_rg' HACMPresource +epprd_rg:cl_export_fs[456] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_export_fs[455] STABLE_STORAGE_COOKIE='' +epprd_rg:cl_export_fs[459] [[ -n epprd_rg ]] +epprd_rg:cl_export_fs[461] odmget -q 'name = SERVICE_LABEL and group = epprd_rg' HACMPresource +epprd_rg:cl_export_fs[462] sed -n '/value =/s/^.*"\(.*\)".*/\1/p' +epprd_rg:cl_export_fs[461] SERVICE_LABEL=epprd +epprd_rg:cl_export_fs[465] primary epprd +epprd_rg:cl_export_fs[primary:55] echo epprd +epprd_rg:cl_export_fs[465] primary=epprd +epprd_rg:cl_export_fs[466] secondary epprd +epprd_rg:cl_export_fs[secondary:74] [[ -n epprd ]] +epprd_rg:cl_export_fs[secondary:74] shift +epprd_rg:cl_export_fs[secondary:75] echo '' +epprd_rg:cl_export_fs[466] secondary='' +epprd_rg:cl_export_fs[468] nfs_node_state='' +epprd_rg:cl_export_fs[471] : Determine if grace periods are enabled +epprd_rg:cl_export_fs[473] ps -eo args +epprd_rg:cl_export_fs[473] grep -w nfsd +epprd_rg:cl_export_fs[473] grep -qw -- '-gp on' +epprd_rg:cl_export_fs[474] gp=on +epprd_rg:cl_export_fs[480] : We can use an NFSv4 node if grace periods are enabled, we are running a +epprd_rg:cl_export_fs[481] : 64-bit kernel, and the nfs4smctl command exists. +epprd_rg:cl_export_fs[483] [[ on == on ]] +epprd_rg:cl_export_fs[483] [[ 64 == 64 ]] +epprd_rg:cl_export_fs[483] [[ -x /usr/sbin/nfs4smctl ]] +epprd_rg:cl_export_fs[485] hasrv=epprd +epprd_rg:cl_export_fs[491] : If we have NFSv4 exports, then we need to configure our NFS node so that +epprd_rg:cl_export_fs[492] : we can use stable storage. Note, NFS only supports this functionality in +epprd_rg:cl_export_fs[493] : its 64-bit kernels. +epprd_rg:cl_export_fs[495] [[ -n '' ]] +epprd_rg:cl_export_fs[580] [[ '' == acquiring ]] +epprd_rg:cl_export_fs[585] ALLEXPORTS=All_exports +epprd_rg:cl_export_fs[587] : update resource manager with this action +epprd_rg:cl_export_fs[589] cl_RMupdate resource_acquiring All_exports cl_export_fs 2023-09-30T03:25:32.720590 2023-09-30T03:25:32.725004 +epprd_rg:cl_export_fs[592] : Build a list of all filesystems that need to be exported, irrespective of +epprd_rg:cl_export_fs[593] : the protocol version. Since some filesystems may be exported with multiple +epprd_rg:cl_export_fs[594] : versions, remove any duplicates. +epprd_rg:cl_export_fs[596] echo /board_org /sapmnt/EPP +epprd_rg:cl_export_fs[596] tr ' ' '\n' +epprd_rg:cl_export_fs[596] sort -u +epprd_rg:cl_export_fs[596] FILESYSTEM_LIST=$'/board_org\n/sapmnt/EPP' +epprd_rg:cl_export_fs[599] : Loop through all of the filesystems we need to export ... +epprd_rg:cl_export_fs[603] v3='' +epprd_rg:cl_export_fs[604] v4='' +epprd_rg:cl_export_fs[605] root=epprd:epprda:epprds +epprd_rg:cl_export_fs[606] new_options='' +epprd_rg:cl_export_fs[607] export_file_line='' +epprd_rg:cl_export_fs[608] USING_EXPORTS_FILE=0 +epprd_rg:cl_export_fs[609] export_lines='' +epprd_rg:cl_export_fs[610] otheroption='' +epprd_rg:cl_export_fs[613] : Get the export line from exportfs for this export +epprd_rg:cl_export_fs[615] exportfs +epprd_rg:cl_export_fs[615] grep '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_export_fs[615] export_line='' +epprd_rg:cl_export_fs[617] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[620] : Get the export file for the filesystem from the exports file. +epprd_rg:cl_export_fs[621] : Only look at the part of the line preceding comments. +epprd_rg:cl_export_fs[624] getline_exports /board_org +epprd_rg:cl_export_fs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_export_fs[getline_exports:45] line='' +epprd_rg:cl_export_fs[getline_exports:45] typeset line +epprd_rg:cl_export_fs[getline_exports:46] flag=0 +epprd_rg:cl_export_fs[getline_exports:46] typeset -i flag +epprd_rg:cl_export_fs[getline_exports:47] fs=/board_org +epprd_rg:cl_export_fs[getline_exports:49] [[ -z /board_org ]] +epprd_rg:cl_export_fs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_export_fs[getline_exports:56] read -r line +epprd_rg:cl_export_fs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_export_fs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_export_fs[getline_exports:69] (( 1 == 0 )) +epprd_rg:cl_export_fs[getline_exports:74] [[ 0 == 1 ]] +epprd_rg:cl_export_fs[getline_exports:56] read -r line +epprd_rg:cl_export_fs[getline_exports:59] line='/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_export_fs[getline_exports:60] line='/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_export_fs[getline_exports:63] [[ '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' == #* ]] +epprd_rg:cl_export_fs[getline_exports:68] echo '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_export_fs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_export_fs[getline_exports:69] (( 0 == 0 )) +epprd_rg:cl_export_fs[getline_exports:71] flag=1 +epprd_rg:cl_export_fs[getline_exports:74] [[ 1 == 1 ]] +epprd_rg:cl_export_fs[getline_exports:76] grep -w '\\' +epprd_rg:cl_export_fs[getline_exports:76] echo '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_export_fs[getline_exports:76] [[ -n '' ]] +epprd_rg:cl_export_fs[getline_exports:81] cl_exports_data=' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_export_fs[getline_exports:82] break +epprd_rg:cl_export_fs[getline_exports:89] return 0 +epprd_rg:cl_export_fs[625] export_file_line=' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_export_fs[628] : If the administrator provides an entry for the filesystem in the +epprd_rg:cl_export_fs[629] : exports file then ignore the root option that was passed in on the +epprd_rg:cl_export_fs[630] : command line. +epprd_rg:cl_export_fs[632] [[ -n ' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' ]] +epprd_rg:cl_export_fs[632] root='' +epprd_rg:cl_export_fs[636] : If the filesystem currently is not exported, then get the options from +epprd_rg:cl_export_fs[637] : the exports file. We will merge these options with options specified +epprd_rg:cl_export_fs[638] : through resource group attributes to produce the actual options we will +epprd_rg:cl_export_fs[639] : provide to exportfs. +epprd_rg:cl_export_fs[641] [[ -z '' ]] +epprd_rg:cl_export_fs[643] export_line=' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_export_fs[644] USING_EXPORTS_FILE=1 +epprd_rg:cl_export_fs[648] : In case of multiple exports for same filesystem +epprd_rg:cl_export_fs[649] : Process them line by line +epprd_rg:cl_export_fs[651] set +u +epprd_rg:cl_export_fs[652] oldifs=$' \t\n' +epprd_rg:cl_export_fs[653] IFS=$'\n' +epprd_rg:cl_export_fs[653] export_lines=( ' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' ) +epprd_rg:cl_export_fs[654] IFS=$' \t\n' +epprd_rg:cl_export_fs[656] [ -n ' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' ] +epprd_rg:cl_export_fs[661] : The line is of the format: filesystem -option1,option2,... +epprd_rg:cl_export_fs[662] : This will give 'option1 option2 ...' +epprd_rg:cl_export_fs[664] echo /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds +epprd_rg:cl_export_fs[664] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_export_fs[664] cut -d- -f2- +epprd_rg:cl_export_fs[665] tr , ' ' +epprd_rg:cl_export_fs[664] old_options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap:epprd:epprda:epprds root=epprdap:epprd:epprda:epprds ' +epprd_rg:cl_export_fs[668] : Each option can be of the format name=value, or just name. +epprd_rg:cl_export_fs[669] : We care about the hasrv, vers, and root options. +epprd_rg:cl_export_fs[673] : Loop through all of the export options for this export. +epprd_rg:cl_export_fs[700] [[ sec=sys:krb5p:krb5i:krb5:dh == -* ]] +epprd_rg:cl_export_fs[708] [[ sec=sys:krb5p:krb5i:krb5:dh == /board_org ]] +epprd_rg:cl_export_fs[716] echo sec=sys:krb5p:krb5i:krb5:dh +epprd_rg:cl_export_fs[716] cut -d= -f 1 +epprd_rg:cl_export_fs[716] otheroption=sec +epprd_rg:cl_export_fs[717] [[ '' == *sec* ]] +epprd_rg:cl_export_fs[724] : Merge in all remaining options. +epprd_rg:cl_export_fs[726] new_options=,sec=sys:krb5p:krb5i:krb5:dh +epprd_rg:cl_export_fs[700] [[ rw == -* ]] +epprd_rg:cl_export_fs[708] [[ rw == /board_org ]] +epprd_rg:cl_export_fs[716] echo rw +epprd_rg:cl_export_fs[716] cut -d= -f 1 +epprd_rg:cl_export_fs[716] otheroption=rw +epprd_rg:cl_export_fs[717] [[ ,sec=sys:krb5p:krb5i:krb5:dh == *rw* ]] +epprd_rg:cl_export_fs[724] : Merge in all remaining options. +epprd_rg:cl_export_fs[726] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw +epprd_rg:cl_export_fs[700] [[ access=epprdap:epprd:epprda:epprds == -* ]] +epprd_rg:cl_export_fs[708] [[ access=epprdap:epprd:epprda:epprds == /board_org ]] +epprd_rg:cl_export_fs[716] echo access=epprdap:epprd:epprda:epprds +epprd_rg:cl_export_fs[716] cut -d= -f 1 +epprd_rg:cl_export_fs[716] otheroption=access +epprd_rg:cl_export_fs[717] [[ ,sec=sys:krb5p:krb5i:krb5:dh,rw == *access* ]] +epprd_rg:cl_export_fs[724] : Merge in all remaining options. +epprd_rg:cl_export_fs[726] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds +epprd_rg:cl_export_fs[695] : Merge in the root option. +epprd_rg:cl_export_fs[697] echo root=epprdap:epprd:epprda:epprds +epprd_rg:cl_export_fs[697] cut -d= -f2- +epprd_rg:cl_export_fs[697] root=epprdap:epprd:epprda:epprds +epprd_rg:cl_export_fs[733] set -u +epprd_rg:cl_export_fs[736] : At this point, v3 and v4 are set based on what is actually exported +epprd_rg:cl_export_fs[737] : or what is configured to be exported in the exports file. +epprd_rg:cl_export_fs[740] (( USING_EXPORTS_FILE )) +epprd_rg:cl_export_fs[742] v3='' +epprd_rg:cl_export_fs[743] v4='' +epprd_rg:cl_export_fs[747] : At this point, v3 and v4 are set based on what is actually exported. +epprd_rg:cl_export_fs[748] : Now add additional versions if the resource group has them configured. +epprd_rg:cl_export_fs[752] [[ /board_org == /board_org ]] +epprd_rg:cl_export_fs[752] v3=:2:3 +epprd_rg:cl_export_fs[752] break +epprd_rg:cl_export_fs[761] : Versions 2 and 3 are the default versions. Some versions of AIX do +epprd_rg:cl_export_fs[762] : not support the vers export option, so only use the option if we are +epprd_rg:cl_export_fs[763] : exporting a non-default value such as 4 +epprd_rg:cl_export_fs[765] [[ -n '' ]] +epprd_rg:cl_export_fs[779] [[ -n epprdap:epprd:epprda:epprds ]] +epprd_rg:cl_export_fs[782] : If we have root priveliged clients, +epprd_rg:cl_export_fs[783] : then add them to the option list. +epprd_rg:cl_export_fs[785] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds +epprd_rg:cl_export_fs[788] [[ -n '' ]] +epprd_rg:cl_export_fs[798] : Strip off the leading comma +epprd_rg:cl_export_fs[800] echo ,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds +epprd_rg:cl_export_fs[800] cut -d, -f2- +epprd_rg:cl_export_fs[800] new_options=sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds +epprd_rg:cl_export_fs[802] [[ -z sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds ]] +epprd_rg:cl_export_fs[811] : Exporting filesystem /board_org with options sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds +epprd_rg:cl_export_fs[813] exportfs -i -o sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds /board_org +epprd_rg:cl_export_fs[814] RC=0 +epprd_rg:cl_export_fs[817] (( 0 != 0 )) +epprd_rg:cl_export_fs[603] v3='' +epprd_rg:cl_export_fs[604] v4='' +epprd_rg:cl_export_fs[605] root=epprd:epprda:epprds +epprd_rg:cl_export_fs[606] new_options='' +epprd_rg:cl_export_fs[607] export_file_line='' +epprd_rg:cl_export_fs[608] USING_EXPORTS_FILE=0 +epprd_rg:cl_export_fs[609] export_lines[0]='' +epprd_rg:cl_export_fs[610] otheroption='' +epprd_rg:cl_export_fs[613] : Get the export line from exportfs for this export +epprd_rg:cl_export_fs[615] exportfs +epprd_rg:cl_export_fs[615] grep '^[[:space:]]*/sapmnt/EPP[[:space:]]' +epprd_rg:cl_export_fs[615] export_line='' +epprd_rg:cl_export_fs[617] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[620] : Get the export file for the filesystem from the exports file. +epprd_rg:cl_export_fs[621] : Only look at the part of the line preceding comments. +epprd_rg:cl_export_fs[624] getline_exports /sapmnt/EPP +epprd_rg:cl_export_fs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_export_fs[getline_exports:45] line='' +epprd_rg:cl_export_fs[getline_exports:45] typeset line +epprd_rg:cl_export_fs[getline_exports:46] flag=0 +epprd_rg:cl_export_fs[getline_exports:46] typeset -i flag +epprd_rg:cl_export_fs[getline_exports:47] fs=/sapmnt/EPP +epprd_rg:cl_export_fs[getline_exports:49] [[ -z /sapmnt/EPP ]] +epprd_rg:cl_export_fs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_export_fs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_export_fs[getline_exports:56] read -r line +epprd_rg:cl_export_fs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_export_fs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:68] grep -q '^[[:space:]]*/sapmnt/EPP[[:space:]]' +epprd_rg:cl_export_fs[getline_exports:69] (( 0 == 0 )) +epprd_rg:cl_export_fs[getline_exports:71] flag=1 +epprd_rg:cl_export_fs[getline_exports:74] [[ 1 == 1 ]] +epprd_rg:cl_export_fs[getline_exports:76] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:76] grep -w '\\' +epprd_rg:cl_export_fs[getline_exports:76] [[ -n '' ]] +epprd_rg:cl_export_fs[getline_exports:81] cl_exports_data=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[getline_exports:82] break +epprd_rg:cl_export_fs[getline_exports:89] return 0 +epprd_rg:cl_export_fs[625] export_file_line=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[628] : If the administrator provides an entry for the filesystem in the +epprd_rg:cl_export_fs[629] : exports file then ignore the root option that was passed in on the +epprd_rg:cl_export_fs[630] : command line. +epprd_rg:cl_export_fs[632] [[ -n ' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' ]] +epprd_rg:cl_export_fs[632] root='' +epprd_rg:cl_export_fs[636] : If the filesystem currently is not exported, then get the options from +epprd_rg:cl_export_fs[637] : the exports file. We will merge these options with options specified +epprd_rg:cl_export_fs[638] : through resource group attributes to produce the actual options we will +epprd_rg:cl_export_fs[639] : provide to exportfs. +epprd_rg:cl_export_fs[641] [[ -z '' ]] +epprd_rg:cl_export_fs[643] export_line=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_export_fs[644] USING_EXPORTS_FILE=1 +epprd_rg:cl_export_fs[648] : In case of multiple exports for same filesystem +epprd_rg:cl_export_fs[649] : Process them line by line +epprd_rg:cl_export_fs[651] set +u +epprd_rg:cl_export_fs[652] oldifs=$' \t\n' +epprd_rg:cl_export_fs[653] IFS=$'\n' +epprd_rg:cl_export_fs[653] export_lines=( ' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' ) +epprd_rg:cl_export_fs[654] IFS=$' \t\n' +epprd_rg:cl_export_fs[656] [ -n ' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' ] +epprd_rg:cl_export_fs[661] : The line is of the format: filesystem -option1,option2,... +epprd_rg:cl_export_fs[662] : This will give 'option1 option2 ...' +epprd_rg:cl_export_fs[664] echo /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_export_fs[664] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_export_fs[664] cut -d- -f2- +epprd_rg:cl_export_fs[665] tr , ' ' +epprd_rg:cl_export_fs[664] old_options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap root=epprdap ' +epprd_rg:cl_export_fs[668] : Each option can be of the format name=value, or just name. +epprd_rg:cl_export_fs[669] : We care about the hasrv, vers, and root options. +epprd_rg:cl_export_fs[673] : Loop through all of the export options for this export. +epprd_rg:cl_export_fs[700] [[ sec=sys:krb5p:krb5i:krb5:dh == -* ]] +epprd_rg:cl_export_fs[708] [[ sec=sys:krb5p:krb5i:krb5:dh == /sapmnt/EPP ]] +epprd_rg:cl_export_fs[716] echo sec=sys:krb5p:krb5i:krb5:dh +epprd_rg:cl_export_fs[716] cut -d= -f 1 +epprd_rg:cl_export_fs[716] otheroption=sec +epprd_rg:cl_export_fs[717] [[ '' == *sec* ]] +epprd_rg:cl_export_fs[724] : Merge in all remaining options. +epprd_rg:cl_export_fs[726] new_options=,sec=sys:krb5p:krb5i:krb5:dh +epprd_rg:cl_export_fs[700] [[ rw == -* ]] +epprd_rg:cl_export_fs[708] [[ rw == /sapmnt/EPP ]] +epprd_rg:cl_export_fs[716] echo rw +epprd_rg:cl_export_fs[716] cut -d= -f 1 +epprd_rg:cl_export_fs[716] otheroption=rw +epprd_rg:cl_export_fs[717] [[ ,sec=sys:krb5p:krb5i:krb5:dh == *rw* ]] +epprd_rg:cl_export_fs[724] : Merge in all remaining options. +epprd_rg:cl_export_fs[726] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw +epprd_rg:cl_export_fs[700] [[ access=epprdap == -* ]] +epprd_rg:cl_export_fs[708] [[ access=epprdap == /sapmnt/EPP ]] +epprd_rg:cl_export_fs[716] cut -d= -f 1 +epprd_rg:cl_export_fs[716] echo access=epprdap +epprd_rg:cl_export_fs[716] otheroption=access +epprd_rg:cl_export_fs[717] [[ ,sec=sys:krb5p:krb5i:krb5:dh,rw == *access* ]] +epprd_rg:cl_export_fs[724] : Merge in all remaining options. +epprd_rg:cl_export_fs[726] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap +epprd_rg:cl_export_fs[695] : Merge in the root option. +epprd_rg:cl_export_fs[697] echo root=epprdap +epprd_rg:cl_export_fs[697] cut -d= -f2- +epprd_rg:cl_export_fs[697] root=epprdap +epprd_rg:cl_export_fs[733] set -u +epprd_rg:cl_export_fs[736] : At this point, v3 and v4 are set based on what is actually exported +epprd_rg:cl_export_fs[737] : or what is configured to be exported in the exports file. +epprd_rg:cl_export_fs[740] (( USING_EXPORTS_FILE )) +epprd_rg:cl_export_fs[742] v3='' +epprd_rg:cl_export_fs[743] v4='' +epprd_rg:cl_export_fs[747] : At this point, v3 and v4 are set based on what is actually exported. +epprd_rg:cl_export_fs[748] : Now add additional versions if the resource group has them configured. +epprd_rg:cl_export_fs[752] [[ /sapmnt/EPP == /board_org ]] +epprd_rg:cl_export_fs[752] [[ /sapmnt/EPP == /sapmnt/EPP ]] +epprd_rg:cl_export_fs[752] v3=:2:3 +epprd_rg:cl_export_fs[752] break +epprd_rg:cl_export_fs[761] : Versions 2 and 3 are the default versions. Some versions of AIX do +epprd_rg:cl_export_fs[762] : not support the vers export option, so only use the option if we are +epprd_rg:cl_export_fs[763] : exporting a non-default value such as 4 +epprd_rg:cl_export_fs[765] [[ -n '' ]] +epprd_rg:cl_export_fs[779] [[ -n epprdap ]] +epprd_rg:cl_export_fs[782] : If we have root priveliged clients, +epprd_rg:cl_export_fs[783] : then add them to the option list. +epprd_rg:cl_export_fs[785] new_options=,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_export_fs[788] [[ -n '' ]] +epprd_rg:cl_export_fs[798] : Strip off the leading comma +epprd_rg:cl_export_fs[800] echo ,sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_export_fs[800] cut -d, -f2- +epprd_rg:cl_export_fs[800] new_options=sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_export_fs[802] [[ -z sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap ]] +epprd_rg:cl_export_fs[811] : Exporting filesystem /sapmnt/EPP with options sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_export_fs[813] exportfs -i -o sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap /sapmnt/EPP +epprd_rg:cl_export_fs[814] RC=0 +epprd_rg:cl_export_fs[817] (( 0 != 0 )) +epprd_rg:cl_export_fs[834] ALLNOERREXPORT=All_nonerror_exports +epprd_rg:cl_export_fs[836] : update resource manager with results +epprd_rg:cl_export_fs[838] cl_RMupdate resource_up All_nonerror_exports cl_export_fs 2023-09-30T03:25:32.886140 2023-09-30T03:25:32.890466 +epprd_rg:cl_export_fs[840] exit 0 +epprd_rg:process_resources(18.067)[export_filesystems:1662] RC=0 +epprd_rg:process_resources(18.067)[export_filesystems:1663] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources(18.067)[export_filesystems:1669] (( 0 != 0 )) +epprd_rg:process_resources(18.067)[export_filesystems:1675] return 0 +epprd_rg:process_resources(18.067)[3324] true +epprd_rg:process_resources(18.067)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(18.067)[3328] set -a +epprd_rg:process_resources(18.067)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-09-30T03:25:32.903722 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(18.080)[3329] eval JOB_TYPE=TELINIT +epprd_rg:process_resources(18.080)[1] JOB_TYPE=TELINIT +epprd_rg:process_resources(18.080)[3330] RC=0 +epprd_rg:process_resources(18.080)[3331] set +a +epprd_rg:process_resources(18.080)[3333] (( 0 != 0 )) +epprd_rg:process_resources(18.080)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(18.080)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(18.080)[3343] export GROUPNAME +epprd_rg:process_resources(18.080)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(18.080)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(18.080)[3360] [[ TELINIT == RELEASE ]] +epprd_rg:process_resources(18.080)[3360] [[ TELINIT == ONLINE ]] +epprd_rg:process_resources(18.080)[3435] cl_telinit +epprd_rg:cl_telinit[178] version=%I% +epprd_rg:cl_telinit[182] TELINIT_FILE=/usr/es/sbin/cluster/.telinit +epprd_rg:cl_telinit[183] USE_TELINIT_FILE=/usr/es/sbin/cluster/.use_telinit +epprd_rg:cl_telinit[185] [[ -f /usr/es/sbin/cluster/.use_telinit ]] +epprd_rg:cl_telinit[189] USE_TELINIT=0 +epprd_rg:cl_telinit[198] [[ '' == -boot ]] +epprd_rg:cl_telinit[236] cl_lsitab clinit +epprd_rg:cl_telinit[236] 1> /dev/null 2>& 1 +epprd_rg:cl_telinit[239] : telinit a disabled +epprd_rg:cl_telinit[241] return 0 +epprd_rg:process_resources(18.101)[3324] true +epprd_rg:process_resources(18.101)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(18.101)[3328] set -a +epprd_rg:process_resources(18.101)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-09-30T03:25:32.938066 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(18.114)[3329] eval JOB_TYPE=MOUNT_FILESYSTEMS ACTION=ACQUIRE FILE_SYSTEMS='"/board;/board_org"' RESOURCE_GROUPS='"epprd_rg' '"' NFS_NETWORKS='""' NFS_HOSTS='""' IP_LABELS='"epprd"' +epprd_rg:process_resources(18.114)[1] JOB_TYPE=MOUNT_FILESYSTEMS +epprd_rg:process_resources(18.114)[1] ACTION=ACQUIRE +epprd_rg:process_resources(18.114)[1] FILE_SYSTEMS='/board;/board_org' +epprd_rg:process_resources(18.114)[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources(18.114)[1] NFS_NETWORKS='' +epprd_rg:process_resources(18.114)[1] NFS_HOSTS='' +epprd_rg:process_resources(18.114)[1] IP_LABELS=epprd +epprd_rg:process_resources(18.114)[3330] RC=0 +epprd_rg:process_resources(18.114)[3331] set +a +epprd_rg:process_resources(18.114)[3333] (( 0 != 0 )) +epprd_rg:process_resources(18.114)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(18.115)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(18.115)[3343] export GROUPNAME +epprd_rg:process_resources(18.115)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(18.115)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(18.115)[3360] [[ MOUNT_FILESYSTEMS == RELEASE ]] +epprd_rg:process_resources(18.115)[3360] [[ MOUNT_FILESYSTEMS == ONLINE ]] +epprd_rg:process_resources(18.115)[3612] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources(18.115)[3614] mount_nfs_filesystems MOUNT +epprd_rg:process_resources(18.115)[mount_nfs_filesystems:1437] PS4_FUNC=mount_nfs_filesystems +epprd_rg:process_resources(18.115)[mount_nfs_filesystems:1437] typeset PS4_FUNC +epprd_rg:process_resources(18.115)[mount_nfs_filesystems:1438] [[ high == high ]] +epprd_rg:process_resources(18.115)[mount_nfs_filesystems:1438] set -x +epprd_rg:process_resources(18.115)[mount_nfs_filesystems:1440] post_event_member=FALSE +epprd_rg:process_resources(18.115)[mount_nfs_filesystems:1444] [[ epprda == epprda ]] +epprd_rg:process_resources(18.115)[mount_nfs_filesystems:1446] post_event_member=TRUE +epprd_rg:process_resources(18.115)[mount_nfs_filesystems:1447] break +epprd_rg:process_resources(18.115)[mount_nfs_filesystems:1452] : This node will not be in the resource group so do not mount filesystems. +epprd_rg:process_resources(18.115)[mount_nfs_filesystems:1454] [[ TRUE == FALSE ]] +epprd_rg:process_resources(18.115)[mount_nfs_filesystems:1459] STAT=0 +epprd_rg:process_resources(18.115)[mount_nfs_filesystems:1463] export GROUPNAME +epprd_rg:process_resources(18.116)[mount_nfs_filesystems:1465] get_list_head '/board;/board_org' +epprd_rg:process_resources(18.116)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(18.116)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(18.116)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(18.116)[get_list_head:60] set -x +epprd_rg:process_resources(18.117)[get_list_head:61] echo '/board;/board_org' +epprd_rg:process_resources(18.119)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(18.119)[get_list_head:61] IFS=: +epprd_rg:process_resources(18.120)[get_list_head:62] echo '/board;/board_org' +epprd_rg:process_resources(18.121)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(18.119)[mount_nfs_filesystems:1465] read LIST_OF_FILE_SYSTEMS_FOR_RG +epprd_rg:process_resources(18.127)[mount_nfs_filesystems:1466] get_list_tail '/board;/board_org' +epprd_rg:process_resources(18.127)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(18.127)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(18.127)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(18.127)[get_list_tail:68] set -x +epprd_rg:process_resources(18.128)[get_list_tail:69] echo '/board;/board_org' +epprd_rg:process_resources(18.130)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(18.130)[get_list_tail:69] IFS=: +epprd_rg:process_resources(18.130)[get_list_tail:70] echo +epprd_rg:process_resources(18.129)[mount_nfs_filesystems:1466] read FILE_SYSTEMS +epprd_rg:process_resources(18.132)[mount_nfs_filesystems:1468] get_list_head +epprd_rg:process_resources(18.132)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(18.132)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(18.132)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(18.132)[get_list_head:60] set -x +epprd_rg:process_resources(18.133)[get_list_head:61] echo +epprd_rg:process_resources(18.135)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(18.135)[get_list_head:61] IFS=: +epprd_rg:process_resources(18.136)[get_list_head:62] echo +epprd_rg:process_resources(18.138)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(18.135)[mount_nfs_filesystems:1468] read NFS_HOST +epprd_rg:process_resources(18.141)[mount_nfs_filesystems:1469] get_list_tail +epprd_rg:process_resources(18.141)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(18.141)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(18.141)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(18.141)[get_list_tail:68] set -x +epprd_rg:process_resources(18.142)[get_list_tail:69] echo +epprd_rg:process_resources(18.146)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(18.146)[get_list_tail:69] IFS=: +epprd_rg:process_resources(18.146)[get_list_tail:70] echo +epprd_rg:process_resources(18.145)[mount_nfs_filesystems:1469] read NFS_HOSTS +epprd_rg:process_resources(18.149)[mount_nfs_filesystems:1471] get_list_head +epprd_rg:process_resources(18.149)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(18.149)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(18.149)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(18.149)[get_list_head:60] set -x +epprd_rg:process_resources(18.151)[get_list_head:61] echo +epprd_rg:process_resources(18.152)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(18.152)[get_list_head:61] IFS=: +epprd_rg:process_resources(18.153)[get_list_head:62] echo +epprd_rg:process_resources(18.154)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(18.150)[mount_nfs_filesystems:1471] read NFS_NETWORK +epprd_rg:process_resources(18.159)[mount_nfs_filesystems:1472] get_list_tail +epprd_rg:process_resources(18.160)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(18.160)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(18.160)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(18.160)[get_list_tail:68] set -x +epprd_rg:process_resources(18.161)[get_list_tail:69] echo +epprd_rg:process_resources(18.162)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(18.162)[get_list_tail:69] IFS=: +epprd_rg:process_resources(18.163)[get_list_tail:70] echo +epprd_rg:process_resources(18.160)[mount_nfs_filesystems:1472] read NFS_NETWORKS +epprd_rg:process_resources(18.165)[mount_nfs_filesystems:1474] get_list_head epprd +epprd_rg:process_resources(18.165)[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources(18.165)[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources(18.165)[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources(18.165)[get_list_head:60] set -x +epprd_rg:process_resources(18.166)[get_list_head:61] echo epprd +epprd_rg:process_resources(18.168)[get_list_head:61] read listhead listtail +epprd_rg:process_resources(18.168)[get_list_head:61] IFS=: +epprd_rg:process_resources(18.170)[get_list_head:62] tr , ' ' +epprd_rg:process_resources(18.169)[get_list_head:62] echo epprd +epprd_rg:process_resources(18.168)[mount_nfs_filesystems:1474] read LIST_OF_IP_LABELS_FOR_RG +epprd_rg:process_resources(18.174)[mount_nfs_filesystems:1475] get_list_tail epprd +epprd_rg:process_resources(18.174)[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources(18.174)[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources(18.174)[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources(18.174)[get_list_tail:68] set -x +epprd_rg:process_resources(18.175)[get_list_tail:69] echo epprd +epprd_rg:process_resources(18.179)[get_list_tail:69] read listhead listtail +epprd_rg:process_resources(18.179)[get_list_tail:69] IFS=: +epprd_rg:process_resources(18.179)[get_list_tail:70] echo +epprd_rg:process_resources(18.178)[mount_nfs_filesystems:1475] read IP_LABELS +epprd_rg:process_resources(18.179)[mount_nfs_filesystems:1477] MOUNT_FILESYSTEM='/board;/board_org' +epprd_rg:process_resources(18.179)[mount_nfs_filesystems:1478] NFSMOUNT_LABEL=epprd +epprd_rg:process_resources(18.179)[mount_nfs_filesystems:1481] : Do the required NFS_mounts. +epprd_rg:process_resources(18.179)[mount_nfs_filesystems:1484] NW_NFSMOUNT_LABEL='' +epprd_rg:process_resources(18.179)[mount_nfs_filesystems:1486] [[ -z '' ]] +epprd_rg:process_resources(18.179)[mount_nfs_filesystems:1488] NFS_HOST=epprda +epprd_rg:process_resources(18.179)[mount_nfs_filesystems:1491] NFSHOST='' +epprd_rg:process_resources(18.180)[mount_nfs_filesystems:1492] [[ -n epprda ]] +epprd_rg:process_resources(18.180)[mount_nfs_filesystems:1494] [[ -n '' ]] +epprd_rg:process_resources(18.180)[mount_nfs_filesystems:1516] [[ MOUNT == REMOUNT ]] +epprd_rg:process_resources(18.180)[mount_nfs_filesystems:1526] ping epprd 1024 1 +epprd_rg:process_resources(18.180)[mount_nfs_filesystems:1526] 1> /dev/null +epprd_rg:process_resources(18.185)[mount_nfs_filesystems:1528] NFSHOST=epprd +epprd_rg:process_resources(18.185)[mount_nfs_filesystems:1529] break +epprd_rg:process_resources(18.185)[mount_nfs_filesystems:1533] [[ -n epprd ]] +epprd_rg:process_resources(18.185)[mount_nfs_filesystems:1536] : activate_nfs will not wait for the mounts to complete +epprd_rg:process_resources(18.185)[mount_nfs_filesystems:1538] cl_activate_nfs 1 epprd '/board;/board_org' +epprd_rg:cl_activate_nfs[68] [[ high == high ]] +epprd_rg:cl_activate_nfs[68] version='1.19.4.2 $Source$' +epprd_rg:cl_activate_nfs[70] . /usr/es/sbin/cluster/events/utils/cl_nfs_utils +epprd_rg:cl_activate_nfs[98] PROGNAME=cl_activate_nfs +epprd_rg:cl_activate_nfs[99] [[ high == high ]] +epprd_rg:cl_activate_nfs[101] set -x +epprd_rg:cl_activate_nfs[102] version=%I +epprd_rg:cl_activate_nfs[105] cl_exports_data='' +epprd_rg:cl_activate_nfs[105] typeset cl_exports_data +epprd_rg:cl_activate_nfs[106] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_activate_nfs[72] set -u +epprd_rg:cl_activate_nfs[242] grep -w ^MOUNT_WLMCNTRL_SELFMANAGE /etc/environment +epprd_rg:cl_activate_nfs[242] export eval +epprd_rg:cl_activate_nfs[244] (( 3 < 3 )) +epprd_rg:cl_activate_nfs[253] ATTEMPTS=1 +epprd_rg:cl_activate_nfs[253] typeset -li ATTEMPTS +epprd_rg:cl_activate_nfs[254] HOST=epprd +epprd_rg:cl_activate_nfs[256] shift 2 +epprd_rg:cl_activate_nfs[261] FILELIST='/board;/board_org' +epprd_rg:cl_activate_nfs[266] print '/board;/board_org' +epprd_rg:cl_activate_nfs[266] grep -q '\;/' +epprd_rg:cl_activate_nfs[271] CROSSMOUNTS=TRUE +epprd_rg:cl_activate_nfs[272] print '/board;/board_org' +epprd_rg:cl_activate_nfs[272] tr ' ' '\n' +epprd_rg:cl_activate_nfs[272] /bin/sort -k 1,1 '-t;' +epprd_rg:cl_activate_nfs[272] MOUNTLIST='/board;/board_org' +epprd_rg:cl_activate_nfs[281] ALLNFS=All_nfs_mounts +epprd_rg:cl_activate_nfs[282] cl_RMupdate resource_acquiring All_nfs_mounts cl_activate_nfs 2023-09-30T03:25:33.061615 2023-09-30T03:25:33.065934 +epprd_rg:cl_activate_nfs[288] odmget -q name='RECOVERY_METHOD AND group=epprd_rg' HACMPresource +epprd_rg:cl_activate_nfs[289] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_activate_nfs[288] METHOD=sequential +epprd_rg:cl_activate_nfs[291] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_activate_nfs[291] odmget -q name='EXPORT_FILESYSTEM AND group=epprd_rg' HACMPresource +epprd_rg:cl_activate_nfs[291] EXPORT_FILESYSTEM=$'/board_org\n/sapmnt/EPP' +epprd_rg:cl_activate_nfs[293] odmget -q name='EXPORT_FILESYSTEM_V4 AND group=epprd_rg' HACMPresource +epprd_rg:cl_activate_nfs[293] sed -n $'s/^[ \t]*value = "\\(.*\\)"/\\1/p' +epprd_rg:cl_activate_nfs[293] EXPORT_FILESYSTEM_V4='' +epprd_rg:cl_activate_nfs[302] EXPFILE=/usr/es/sbin/cluster/etc/exports +epprd_rg:cl_activate_nfs[304] [[ -z '' ]] +epprd_rg:cl_activate_nfs[305] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_activate_nfs[308] VERSION_SOURCE=FILES +epprd_rg:cl_activate_nfs[320] [[ FILES == FILES ]] +epprd_rg:cl_activate_nfs[322] export_v3='' +epprd_rg:cl_activate_nfs[323] export_v4='' +epprd_rg:cl_activate_nfs[330] getline_exports /board_org +epprd_rg:cl_activate_nfs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_activate_nfs[getline_exports:45] line='' +epprd_rg:cl_activate_nfs[getline_exports:45] typeset line +epprd_rg:cl_activate_nfs[getline_exports:46] flag=0 +epprd_rg:cl_activate_nfs[getline_exports:46] typeset -i flag +epprd_rg:cl_activate_nfs[getline_exports:47] fs=/board_org +epprd_rg:cl_activate_nfs[getline_exports:49] [[ -z /board_org ]] +epprd_rg:cl_activate_nfs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_activate_nfs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_activate_nfs[getline_exports:56] read -r line +epprd_rg:cl_activate_nfs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_activate_nfs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_activate_nfs[getline_exports:69] (( 1 == 0 )) +epprd_rg:cl_activate_nfs[getline_exports:74] [[ 0 == 1 ]] +epprd_rg:cl_activate_nfs[getline_exports:56] read -r line +epprd_rg:cl_activate_nfs[getline_exports:59] line='/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_activate_nfs[getline_exports:60] line='/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_activate_nfs[getline_exports:63] [[ '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' == #* ]] +epprd_rg:cl_activate_nfs[getline_exports:68] echo '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_activate_nfs[getline_exports:68] grep -q '^[[:space:]]*/board_org[[:space:]]' +epprd_rg:cl_activate_nfs[getline_exports:69] (( 0 == 0 )) +epprd_rg:cl_activate_nfs[getline_exports:71] flag=1 +epprd_rg:cl_activate_nfs[getline_exports:74] [[ 1 == 1 ]] +epprd_rg:cl_activate_nfs[getline_exports:76] echo '/board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_activate_nfs[getline_exports:76] grep -w '\\' +epprd_rg:cl_activate_nfs[getline_exports:76] [[ -n '' ]] +epprd_rg:cl_activate_nfs[getline_exports:81] cl_exports_data=' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_activate_nfs[getline_exports:82] break +epprd_rg:cl_activate_nfs[getline_exports:89] return 0 +epprd_rg:cl_activate_nfs[331] export_line=' /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds' +epprd_rg:cl_activate_nfs[336] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_activate_nfs[336] echo /board_org -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap:epprd:epprda:epprds,root=epprdap:epprd:epprda:epprds +epprd_rg:cl_activate_nfs[337] cut -d- -f2- +epprd_rg:cl_activate_nfs[337] tr , ' ' +epprd_rg:cl_activate_nfs[336] options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap:epprd:epprda:epprds root=epprdap:epprd:epprda:epprds ' +epprd_rg:cl_activate_nfs[343] vers_missing=1 +epprd_rg:cl_activate_nfs[366] (( vers_missing )) +epprd_rg:cl_activate_nfs[366] export_v3=' /board_org' +epprd_rg:cl_activate_nfs[330] getline_exports /sapmnt/EPP +epprd_rg:cl_activate_nfs[getline_exports:44] cl_exports_data='' +epprd_rg:cl_activate_nfs[getline_exports:45] line='' +epprd_rg:cl_activate_nfs[getline_exports:45] typeset line +epprd_rg:cl_activate_nfs[getline_exports:46] flag=0 +epprd_rg:cl_activate_nfs[getline_exports:46] typeset -i flag +epprd_rg:cl_activate_nfs[getline_exports:47] fs=/sapmnt/EPP +epprd_rg:cl_activate_nfs[getline_exports:49] [[ -z /sapmnt/EPP ]] +epprd_rg:cl_activate_nfs[getline_exports:54] [[ -r /usr/es/sbin/cluster/etc/exports ]] +epprd_rg:cl_activate_nfs[getline_exports:56] cat /usr/es/sbin/cluster/etc/exports +epprd_rg:cl_activate_nfs[getline_exports:56] read -r line +epprd_rg:cl_activate_nfs[getline_exports:59] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:60] line='/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:63] [[ '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' == #* ]] +epprd_rg:cl_activate_nfs[getline_exports:68] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:68] grep -q '^[[:space:]]*/sapmnt/EPP[[:space:]]' +epprd_rg:cl_activate_nfs[getline_exports:69] (( 0 == 0 )) +epprd_rg:cl_activate_nfs[getline_exports:71] flag=1 +epprd_rg:cl_activate_nfs[getline_exports:74] [[ 1 == 1 ]] +epprd_rg:cl_activate_nfs[getline_exports:76] echo '/sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:76] grep -w '\\' +epprd_rg:cl_activate_nfs[getline_exports:76] [[ -n '' ]] +epprd_rg:cl_activate_nfs[getline_exports:81] cl_exports_data=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[getline_exports:82] break +epprd_rg:cl_activate_nfs[getline_exports:89] return 0 +epprd_rg:cl_activate_nfs[331] export_line=' /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap' +epprd_rg:cl_activate_nfs[336] echo /sapmnt/EPP -sec=sys:krb5p:krb5i:krb5:dh,rw,access=epprdap,root=epprdap +epprd_rg:cl_activate_nfs[336] awk '{ for (i=2; i<=NF; i++) printf $i " "; print "" }' +epprd_rg:cl_activate_nfs[337] cut -d- -f2- +epprd_rg:cl_activate_nfs[337] tr , ' ' +epprd_rg:cl_activate_nfs[336] options=sec='sys:krb5p:krb5i:krb5:dh rw access=epprdap root=epprdap ' +epprd_rg:cl_activate_nfs[343] vers_missing=1 +epprd_rg:cl_activate_nfs[366] (( vers_missing )) +epprd_rg:cl_activate_nfs[366] export_v3=' /board_org /sapmnt/EPP' +epprd_rg:cl_activate_nfs[369] EXPORT_FILESYSTEM=' /board_org /sapmnt/EPP' +epprd_rg:cl_activate_nfs[370] EXPORT_FILESYSTEM_V4='' +epprd_rg:cl_activate_nfs[377] [[ -x /usr/sbin/nfsrgyd ]] +epprd_rg:cl_activate_nfs[378] [[ -n '' ]] +epprd_rg:cl_activate_nfs[379] grep -q vers=4 /etc/filesystems +epprd_rg:cl_activate_nfs[394] [[ TRUE == TRUE ]] +epprd_rg:cl_activate_nfs[411] filesystem=/board_org +epprd_rg:cl_activate_nfs[412] mountpoint=/board +epprd_rg:cl_activate_nfs:/board;/board_org[429] PS4_LOOP='/board;/board_org' +epprd_rg:cl_activate_nfs:/board;/board_org[430] [[ sequential == sequential ]] +epprd_rg:cl_activate_nfs:/board;/board_org[432] nfs_mount 1 epprd /board_org /board +epprd_rg:cl_activate_nfs(0.127):/board;/board_org[nfs_mount:99] (( 4 != 4 )) +epprd_rg:cl_activate_nfs(0.127):/board;/board_org[nfs_mount:108] LIMIT=1 +epprd_rg:cl_activate_nfs(0.127):/board;/board_org[nfs_mount:108] typeset -li LIMIT +epprd_rg:cl_activate_nfs(0.127):/board;/board_org[nfs_mount:109] HOST=epprd +epprd_rg:cl_activate_nfs(0.127):/board;/board_org[nfs_mount:110] FileSystem=/board_org +epprd_rg:cl_activate_nfs(0.127):/board;/board_org[nfs_mount:111] MountPoint=/board +epprd_rg:cl_activate_nfs(0.128):/board;/board_org[nfs_mount:116] mount +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ mounted == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ --------------- == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ procfs == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ /sapcd == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ ahafs == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.130):/board;/board_org[nfs_mount:119] [[ jfs2 == /board ]] +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:117] read node node_fs lcl_mount rest +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:126] vers='' +epprd_rg:cl_activate_nfs(0.131):/board;/board_org[nfs_mount:127] [[ FILES == ODM ]] +epprd_rg:cl_activate_nfs(0.132):/board;/board_org[nfs_mount:141] lsfs -c -v nfs +epprd_rg:cl_activate_nfs(0.135):/board;/board_org[nfs_mount:141] grep ^/board: +epprd_rg:cl_activate_nfs(0.136):/board;/board_org[nfs_mount:141] cut -d: -f7 +epprd_rg:cl_activate_nfs(0.139):/board;/board_org[nfs_mount:141] OPTIONS='' +epprd_rg:cl_activate_nfs(0.141):/board;/board_org[nfs_mount:142] echo +epprd_rg:cl_activate_nfs(0.142):/board;/board_org[nfs_mount:142] sed s/+/:/g +epprd_rg:cl_activate_nfs(0.145):/board;/board_org[nfs_mount:142] OPTIONS='' +epprd_rg:cl_activate_nfs(0.145):/board;/board_org[nfs_mount:144] [[ -z '' ]] +epprd_rg:cl_activate_nfs(0.145):/board;/board_org[nfs_mount:150] OPTIONS=hard,intr +epprd_rg:cl_activate_nfs(0.145):/board;/board_org[nfs_mount:168] [[ -n '' ]] +epprd_rg:cl_activate_nfs(0.145):/board;/board_org[nfs_mount:175] [[ sequential == sequential ]] +epprd_rg:cl_activate_nfs(0.147):/board;/board_org[nfs_mount:177] print hard,intr +epprd_rg:cl_activate_nfs(0.148):/board;/board_org[nfs_mount:177] sed s/bg/fg/g +epprd_rg:cl_activate_nfs(0.151):/board;/board_org[nfs_mount:177] OPTIONS=hard,intr +epprd_rg:cl_activate_nfs(0.151):/board;/board_org[nfs_mount:178] let LIMIT+=4 +epprd_rg:cl_activate_nfs(0.151):/board;/board_org[nfs_mount:184] typeset RC +epprd_rg:cl_activate_nfs(0.151):/board;/board_org[nfs_mount:186] amlog_trace '' 'Activating NFS|/board_org' +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:319] cltime +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:319] DATE=2023-09-30T03:25:33.194985 +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:320] echo '|2023-09-30T03:25:33.194985|INFO: Activating NFS|/board_org' +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_nfs(0.180):/board;/board_org[nfs_mount:187] (( TRIES=0)) +epprd_rg:cl_activate_nfs(0.180):/board;/board_org[nfs_mount:187] (( TRIES' 'Activating NFS|/board_org' +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:318] clcycle clavailability.log +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:319] cltime +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:319] DATE=2023-09-30T03:25:33.264968 +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:320] echo '|2023-09-30T03:25:33.264968|INFO: Activating NFS|/board_org' +epprd_rg:cl_activate_nfs:/board;/board_org[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:cl_activate_nfs(0.250):/board;/board_org[nfs_mount:203] return 0 +epprd_rg:process_resources(18.441)[mount_nfs_filesystems:1540] RC=0 +epprd_rg:process_resources(18.441)[mount_nfs_filesystems:1541] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources(18.441)[mount_nfs_filesystems:1549] (( 0 != 0 )) +epprd_rg:process_resources(18.441)[mount_nfs_filesystems:1565] return 0 +epprd_rg:process_resources(18.441)[3324] true +epprd_rg:process_resources(18.441)[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources(18.441)[3328] set -a +epprd_rg:process_resources(18.441)[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-09-30T03:25:33.277856 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources(18.454)[3329] eval JOB_TYPE=NONE +epprd_rg:process_resources(18.454)[1] JOB_TYPE=NONE +epprd_rg:process_resources(18.454)[3330] RC=0 +epprd_rg:process_resources(18.454)[3331] set +a +epprd_rg:process_resources(18.454)[3333] (( 0 != 0 )) +epprd_rg:process_resources(18.454)[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources(18.454)[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources(18.454)[3343] export GROUPNAME +epprd_rg:process_resources(18.454)[3353] IS_SERVICE_START=1 +epprd_rg:process_resources(18.454)[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources(18.454)[3360] [[ NONE == RELEASE ]] +epprd_rg:process_resources(18.454)[3360] [[ NONE == ONLINE ]] +epprd_rg:process_resources(18.454)[3729] break +epprd_rg:process_resources(18.454)[3740] : If sddsrv was turned off above, turn it back on again +epprd_rg:process_resources(18.454)[3742] [[ FALSE == TRUE ]] +epprd_rg:process_resources(18.454)[3747] exit 0 :rg_move[247] : unsetting AM_SYNC_CALLED_BY from $'callers environment as\n: we dont' require it after this point in execution. :rg_move[250] unset AM_SYNC_CALLED_BY :rg_move[253] [[ -f /tmp/.NFSSTOPPED ]] :rg_move[274] [[ -f /tmp/.RPCLOCKDSTOPPED ]] :rg_move[276] rm -f /tmp/.RPCLOCKDSTOPPED :rg_move[277] ATTEMPT=0 :rg_move[277] typeset -li ATTEMPT :rg_move[278] (( ATTEMPT++ < 60 )) :rg_move[280] : rpc.lockd status check :rg_move[281] lssrc -s rpc.lockd :rg_move[281] LC_ALL=C :rg_move[281] grep stopping :rg_move[282] (( 1 == 0 )) :rg_move[282] break :rg_move[285] startsrc -s rpc.lockd 0513-059 The rpc.lockd Subsystem has been started. Subsystem PID is 20381966. :rg_move[286] rcstartsrc=0 :rg_move[287] (( 0 != 0 )) :rg_move[293] exit 0 Sep 30 2023 03:25:33 EVENT COMPLETED: rg_move epprda 1 ACQUIRE 0 |2023-09-30T03:25:33|18770|EVENT COMPLETED: rg_move epprda 1 ACQUIRE 0| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-09-30T03:25:33.395022 :clevlog[amlog_trace:320] echo '|2023-09-30T03:25:33.395022|INFO: rg_move|epprd_rg|epprda|1|ACQUIRE|0' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :rg_move_acquire[+119] exit_status=0 :rg_move_acquire[+120] : exit status of clcallev rg_move epprda 1 ACQUIRE is: 0 :rg_move_acquire[+121] exit 0 Sep 30 2023 03:25:33 EVENT COMPLETED: rg_move_acquire epprda 1 0 |2023-09-30T03:25:33|18770|EVENT COMPLETED: rg_move_acquire epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-30T03:25:33.519065 + echo '|2023-09-30T03:25:33.519065|INFO: rg_move_acquire|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log Sep 30 2023 03:25:33 EVENT START: rg_move_complete epprda 1 |2023-09-30T03:25:33|18770|EVENT START: rg_move_complete epprda 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-30T03:25:33.714161 + echo '|2023-09-30T03:25:33.714161|INFO: rg_move_complete|epprd_rg|epprda|1' + 1>> /var/hacmp/availability/clavailability.log :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] clnodename :get_local_nodename[63] grep -w epprda :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :rg_move_complete[91] version=%I% :rg_move_complete[97] STATUS=0 :rg_move_complete[97] typeset -li STATUS :rg_move_complete[99] [[ -z '' ]] :rg_move_complete[101] EMULATE=REAL :rg_move_complete[104] set -u :rg_move_complete[106] (( 2 < 2 || 2 > 3 )) :rg_move_complete[112] NODENAME=epprda :rg_move_complete[112] export NODENAME :rg_move_complete[113] RGID=1 :rg_move_complete[114] (( 2 == 3 )) :rg_move_complete[118] RGDESTINATION='' :rg_move_complete[122] : serial number for this event is 18770 :rg_move_complete[126] : Interpret resource group ID into a resource group name. :rg_move_complete[128] clodmget -qid=1 -f group -n HACMPgroup :rg_move_complete[128] eval RGNAME=epprd_rg :rg_move_complete[1] RGNAME=epprd_rg +epprd_rg:rg_move_complete[129] GROUPNAME=epprd_rg +epprd_rg:rg_move_complete[131] UPDATESTATD=0 +epprd_rg:rg_move_complete[131] typeset -li UPDATESTATD +epprd_rg:rg_move_complete[132] NFSSTOPPED=0 +epprd_rg:rg_move_complete[132] typeset -li NFSSTOPPED +epprd_rg:rg_move_complete[133] LIMIT=60 +epprd_rg:rg_move_complete[133] WAIT=1 +epprd_rg:rg_move_complete[133] TRY=0 +epprd_rg:rg_move_complete[133] typeset -li LIMIT WAIT TRY +epprd_rg:rg_move_complete[136] : If this is a two node cluster and exported filesystems exist, then +epprd_rg:rg_move_complete[137] : when the cluster topology is stable notify rpc.statd of the changes. +epprd_rg:rg_move_complete[139] clnodename +epprd_rg:rg_move_complete[139] wc -l +epprd_rg:rg_move_complete[139] (( 2 == 2 )) +epprd_rg:rg_move_complete[141] clodmget -f group -n HACMPgroup +epprd_rg:rg_move_complete[141] RESOURCE_GROUPS=epprd_rg +epprd_rg:rg_move_complete[144] clodmget -q group='epprd_rg AND name=EXPORT_FILESYSTEM' -f value -n HACMPresource +epprd_rg:rg_move_complete[144] EXPORTLIST=$'/board_org\n/sapmnt/EPP' +epprd_rg:rg_move_complete[146] [[ -n $'/board_org\n/sapmnt/EPP' ]] +epprd_rg:rg_move_complete[146] [[ epprd_rg == epprd_rg ]] +epprd_rg:rg_move_complete[148] UPDATESTATD=1 +epprd_rg:rg_move_complete[149] [[ REAL == EMUL ]] +epprd_rg:rg_move_complete[154] cl_update_statd :cl_update_statd(0)[+174] version=%I% :cl_update_statd(0)[+176] typeset -i RC=0 :cl_update_statd(0)[+178] LOCAL_FOUND= :cl_update_statd(0)[+179] TWIN_NAME= :cl_update_statd(0)[+180] [[ -z epprda ]] :cl_update_statd(0)[+181] :cl_update_statd(0)[+181] cl_get_path -S OP_SEP=~ :cl_update_statd(0)[+182] set -u :cl_update_statd(0)[+187] LOCAL_FOUND=true :cl_update_statd(0)[+194] : Make sure statd is running locally :cl_update_statd(0)[+196] lssrc -s statd :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw inoperative :cl_update_statd(0)[+196] rpcinfo -p :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw status :cl_update_statd(0)[+207] : Get the current twin, if there is one :cl_update_statd(0)[+209] :cl_update_statd(0)[+209] nfso -H sm_gethost :cl_update_statd(0)[+209] 2>& 1 CURTWIN= :cl_update_statd(0)[+210] RC=0 :cl_update_statd(0)[+212] [[ -z true ]] :cl_update_statd(0)[+212] [[ -z ]] :cl_update_statd(0)[+215] : Local node is no longer a cluster member, unregister its twin :cl_update_statd(0)[+215] [[ -n ]] :cl_update_statd(0)[+259] : RC is actually 0 :cl_update_statd(0)[+266] return 0 +epprd_rg:rg_move_complete[155] (( 0 != 0 )) +epprd_rg:rg_move_complete[160] break +epprd_rg:rg_move_complete[166] : Set the RESOURCE_GROUPS environment variable with the names +epprd_rg:rg_move_complete[167] : of all resource groups participating in this event, and export +epprd_rg:rg_move_complete[168] : them to all successive scripts. +epprd_rg:rg_move_complete[170] set -a +epprd_rg:rg_move_complete[171] clsetenvgrp epprda rg_move_complete epprd_rg :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move_complete epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 +epprd_rg:rg_move_complete[171] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_complete[172] RC=0 +epprd_rg:rg_move_complete[173] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_complete[1] FORCEDOWN_GROUPS='' +epprd_rg:rg_move_complete[2] RESOURCE_GROUPS='' +epprd_rg:rg_move_complete[3] HOMELESS_GROUPS='' +epprd_rg:rg_move_complete[4] HOMELESS_FOLLOWER_GROUPS='' +epprd_rg:rg_move_complete[5] ERRSTATE_GROUPS='' +epprd_rg:rg_move_complete[6] PRINCIPAL_ACTIONS='' +epprd_rg:rg_move_complete[7] ASSOCIATE_ACTIONS='' +epprd_rg:rg_move_complete[8] AUXILLIARY_ACTIONS='' +epprd_rg:rg_move_complete[8] SIBLING_GROUPS='' +epprd_rg:rg_move_complete[9] SIBLING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[10] SIBLING_ACQUIRING_GROUPS='' +epprd_rg:rg_move_complete[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[12] SIBLING_RELEASING_GROUPS='' +epprd_rg:rg_move_complete[13] SIBLING_RELEASING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[174] set +a +epprd_rg:rg_move_complete[175] (( 0 != 0 )) +epprd_rg:rg_move_complete[182] : For each participating resource group, serially process the resources. +epprd_rg:rg_move_complete[251] (( 1 == 1 )) +epprd_rg:rg_move_complete[253] [[ REAL == EMUL ]] +epprd_rg:rg_move_complete[259] stopsrc -s rpc.lockd 0513-044 The rpc.lockd Subsystem was requested to stop. +epprd_rg:rg_move_complete[260] rcstopsrc=0 +epprd_rg:rg_move_complete[261] (( 0 != 0 )) +epprd_rg:rg_move_complete[266] (( TRY=0)) +epprd_rg:rg_move_complete[266] (( 0<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 1<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 2<60)) +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 3<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 4<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 5<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 6<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 7<60)) +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 8<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 9<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 10<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 11<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 12<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 13<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 14<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 15<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 16<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 17<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 18<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 19<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 20<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z '' ]] +epprd_rg:rg_move_complete[273] break +epprd_rg:rg_move_complete[277] [[ ! -z '' ]] +epprd_rg:rg_move_complete[300] : Sure that rpc.lockd stopped. Restart it. +epprd_rg:rg_move_complete[302] startsrc -s rpc.lockd 0513-059 The rpc.lockd Subsystem has been started. Subsystem PID is 17826132. +epprd_rg:rg_move_complete[303] rcstartsrc=0 +epprd_rg:rg_move_complete[304] (( 0 != 0 )) +epprd_rg:rg_move_complete[365] : If the resource group in this rg_move is now homeless, +epprd_rg:rg_move_complete[366] : then we need to put it into an error state. +epprd_rg:rg_move_complete[368] active_node=0 +epprd_rg:rg_move_complete[428] : If the resource group in this rg_move is now homeless_secondary, +epprd_rg:rg_move_complete[429] : then we need to put it into an errorsecondary state. +epprd_rg:rg_move_complete[437] : Set an error state for concurrent groups that have +epprd_rg:rg_move_complete[438] : been brought offline on this node by rg_move. +epprd_rg:rg_move_complete[453] AM_SYNC_CALLED_BY=RG_MOVE_COMPLETE +epprd_rg:rg_move_complete[453] export AM_SYNC_CALLED_BY +epprd_rg:rg_move_complete[454] process_resources :process_resources[3318] version=1.169 :process_resources[3321] STATUS=0 :process_resources[3322] sddsrv_off=FALSE :process_resources[3324] true :process_resources[3326] : call rgpa, and it will tell us what to do next :process_resources[3328] set -a :process_resources[3329] clRGPA :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa 2023-09-30T03:25:54.020283 clrgpa :clRGPA[+55] exit 0 :process_resources[3329] eval JOB_TYPE=SYNC_VGS ACTION=ACQUIRE VOLUME_GROUPS='"datavg"' RESOURCE_GROUPS='"epprd_rg' '"' :process_resources[1] JOB_TYPE=SYNC_VGS :process_resources[1] ACTION=ACQUIRE :process_resources[1] VOLUME_GROUPS=datavg :process_resources[1] RESOURCE_GROUPS='epprd_rg ' :process_resources[3330] RC=0 :process_resources[3331] set +a :process_resources[3333] (( 0 != 0 )) :process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ SYNC_VGS == RELEASE ]] +epprd_rg:process_resources[3360] [[ SYNC_VGS == ONLINE ]] +epprd_rg:process_resources[3474] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[3476] sync_volume_groups +epprd_rg:process_resources[sync_volume_groups:2699] PS4_FUNC=sync_volume_groups +epprd_rg:process_resources[sync_volume_groups:2699] typeset PS4_FUNC +epprd_rg:process_resources[sync_volume_groups:2700] [[ high == high ]] +epprd_rg:process_resources[sync_volume_groups:2700] set -x +epprd_rg:process_resources[sync_volume_groups:2701] STAT=0 +epprd_rg:process_resources[sync_volume_groups:2704] export GROUPNAME +epprd_rg:process_resources[sync_volume_groups:2706] get_list_head datavg +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo datavg +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo datavg +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[sync_volume_groups:2706] read LIST_OF_VOLUME_GROUPS_FOR_RG +epprd_rg:process_resources[sync_volume_groups:2707] get_list_tail datavg +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo datavg +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[sync_volume_groups:2707] read VOLUME_GROUPS +epprd_rg:process_resources[sync_volume_groups:2710] : Sync the active volume groups +epprd_rg:process_resources[sync_volume_groups:2712] lsvg -L -o +epprd_rg:process_resources[sync_volume_groups:2712] 2> /tmp/lsvg.err +epprd_rg:process_resources[sync_volume_groups:2712] sort +epprd_rg:process_resources[sync_volume_groups:2712] 1> /tmp/lsvg.out.20381972 +epprd_rg:process_resources[sync_volume_groups:2713] echo datavg +epprd_rg:process_resources[sync_volume_groups:2713] tr ' ' '\n' +epprd_rg:process_resources[sync_volume_groups:2714] sort +epprd_rg:process_resources[sync_volume_groups:2714] comm -12 /tmp/lsvg.out.20381972 - +epprd_rg:process_resources[sync_volume_groups:2716] cl_sync_vgs datavg +epprd_rg:process_resources[sync_volume_groups:2718] [[ -s /tmp/lsvg.err ]] +epprd_rg:process_resources[sync_volume_groups:2723] rm -f /tmp/lsvg.out.20381972 /tmp/lsvg.err +epprd_rg:process_resources[sync_volume_groups:2732] unset AM_SYNC_CALLED_BY +epprd_rg:process_resources[sync_volume_groups:2734] return 0 +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:cl_sync_vgs[303] version=1.24.1.4 +epprd_rg:cl_sync_vgs[306] (( 1 == 0 )) +epprd_rg:cl_sync_vgs[312] : syncing 4 stale PPs at a time seems to be a win most of the time, but +epprd_rg:cl_sync_vgs[313] : we honor the NUM_PARALLEL_LPS value from /etc/environment, as does +epprd_rg:cl_sync_vgs[314] : syncvg. +epprd_rg:cl_sync_vgs[316] syncflag='' +epprd_rg:cl_sync_vgs[316] export syncflag +epprd_rg:cl_sync_vgs[317] PS4_LOOP='' +epprd_rg:cl_sync_vgs[317] export PS4_LOOP +epprd_rg:cl_sync_vgs[318] typeset -i npl +epprd_rg:cl_sync_vgs[319] grep -q ^NUM_PARALLEL_LPS= /etc/environment +epprd_rg:cl_sync_vgs[321] syncflag=-P4 +epprd_rg:cl_sync_vgs[328] echo 'NOTE: While the sync is going on, volume group can be used' NOTE: While the sync is going on, volume group can be used +epprd_rg:cl_sync_vgs[331] : For GLVM volume groups, read PARALLEL LPS count from HACMPresource if it is set from GUI, +epprd_rg:cl_sync_vgs[332] : else, read from environment variables, if it is not set use 32 as default value. +epprd_rg:cl_sync_vgs[334] clodmget -q name='GMVG_REP_RESOURCE and value=datavg' -f group HACMPresource +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa +epprd_rg:cl_sync_vgs[334] 2> /dev/null 2023-09-30T03:25:54.083494 clrgpa +epprd_rg:cl_sync_vgs[334] glvm_rg='' +epprd_rg:cl_sync_vgs[335] [[ -n '' ]] +epprd_rg:cl_sync_vgs[353] check_sync datavg +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:76] typeset vg_name +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:77] typeset vgid +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:78] typeset disklist +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:79] typeset lv_name +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:80] typeset -li stale_count +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:81] typeset -li mode +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:82] RC=0 +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:82] typeset -li RC +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:83] typeset site_node_list +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:84] typeset site_choice +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:86] vg_name=datavg +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:87] disklist='' +epprd_rg:cl_sync_vgs(0.017):datavg[check_sync:89] getlvodm -v datavg +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=APPLICATIONS ACTION=ACQUIRE ALL_APPLICATIONS='"epprd_app"' RESOURCE_GROUPS='"epprd_rg' '"' MISCDATA='""' +epprd_rg:process_resources[1] JOB_TYPE=APPLICATIONS +epprd_rg:process_resources[1] ACTION=ACQUIRE +epprd_rg:process_resources[1] ALL_APPLICATIONS=epprd_app +epprd_rg:process_resources[1] RESOURCE_GROUPS='epprd_rg ' +epprd_rg:process_resources[1] MISCDATA='' +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ APPLICATIONS == RELEASE ]] +epprd_rg:process_resources[3360] [[ APPLICATIONS == ONLINE ]] +epprd_rg:process_resources[3549] process_applications ACQUIRE +epprd_rg:process_resources[process_applications:312] PS4_FUNC=process_applications +epprd_rg:process_resources[process_applications:312] typeset PS4_FUNC +epprd_rg:process_resources[process_applications:313] [[ high == high ]] +epprd_rg:process_resources[process_applications:313] set -x +epprd_rg:process_resources[process_applications:316] : Each subprocess will log to a file with this name and PID +epprd_rg:process_resources[process_applications:318] TMP_FILE=/var/hacmp/log/.process_resources_applications.20381972 +epprd_rg:process_resources[process_applications:318] export TMP_FILE +epprd_rg:process_resources[process_applications:320] rm -f '/var/hacmp/log/.process_resources_applications*' +epprd_rg:cl_sync_vgs(0.020):datavg[check_sync:89] vgid=00c44af100004b00000001851e9dc053 +epprd_rg:cl_sync_vgs(0.020):datavg[check_sync:92] : find disks in the VG that LVM thinks are inaccessable +epprd_rg:cl_sync_vgs(0.020):datavg[check_sync:94] lsvg -L -p datavg +epprd_rg:cl_sync_vgs(0.021):datavg[check_sync:94] LC_ALL=C +epprd_rg:process_resources[process_applications:322] WAITPIDS='' +epprd_rg:process_resources[process_applications:323] LPAR_ACQUIRE_FAILED=0 +epprd_rg:process_resources[process_applications:324] LPAR_RELEASE_FAILED=0 +epprd_rg:process_resources[process_applications:325] START_STOP_FAILED=0 +epprd_rg:process_resources[process_applications:326] LIST_OF_APPS=epprd_app +epprd_rg:process_resources[process_applications:329] : Acquire lpar resources in one-shot before starting applications +epprd_rg:process_resources[process_applications:331] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[process_applications:333] GROUPNAME=epprd_rg +epprd_rg:process_resources[process_applications:333] export GROUPNAME +epprd_rg:process_resources[process_applications:334] clmanageroha -o acquire -s -l epprd_app +epprd_rg:process_resources[process_applications:334] 3>& 2 +epprd_rg:cl_sync_vgs(0.039):datavg[check_sync:94] disklist=$'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:cl_sync_vgs(0.041):datavg[check_sync:95] print -- $'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:cl_sync_vgs(0.043):datavg[check_sync:95] grep -w missing +epprd_rg:cl_sync_vgs(0.044):datavg[check_sync:95] cut -f1 '-d ' +epprd_rg:cl_sync_vgs(0.048):datavg[check_sync:95] missing_disklist='' +epprd_rg:cl_sync_vgs(0.049):datavg[check_sync:96] print -- $'datavg:\nPV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION\nhdisk2 active 199 89 40..00..00..09..40\nhdisk3 active 199 89 40..00..00..09..40\nhdisk4 active 199 88 40..00..00..08..40\nhdisk5 active 199 89 40..00..00..09..40\nhdisk6 active 199 89 40..00..00..09..40\nhdisk7 active 199 89 40..00..00..09..40\nhdisk8 active 199 89 40..00..00..09..40' +epprd_rg:cl_sync_vgs(0.055):datavg[check_sync:96] grep -w removed +epprd_rg:cl_sync_vgs(0.062):datavg[check_sync:96] cut -f1 '-d ' +epprd_rg:cl_sync_vgs(0.069):datavg[check_sync:96] removed_disklist='' +epprd_rg:cl_sync_vgs(0.069):datavg[check_sync:100] : Proceeed if there are some disks that LVM thinks are inaccessable +epprd_rg:cl_sync_vgs(0.069):datavg[check_sync:102] [[ -n '' ]] +epprd_rg:cl_sync_vgs(0.069):datavg[check_sync:102] [[ -n '' ]] +epprd_rg:cl_sync_vgs(0.069):datavg[check_sync:196] : sync if any LVs in the VG that have stale partitions +epprd_rg:cl_sync_vgs(0.069):datavg[check_sync:198] (( 0 == 0 )) +epprd_rg:cl_sync_vgs(0.069):datavg[check_sync:201] : A status of 2,3,5 or 7 indicates the presence of dirty and/or stale partitions +epprd_rg:cl_sync_vgs(0.069):datavg[check_sync:213] is_start_logged=0 +epprd_rg:cl_sync_vgs(0.069):datavg[check_sync:218] at_least_one_sync_success=0 +epprd_rg:cl_sync_vgs(0.070):datavg[check_sync:219] lqueryvg -g 00c44af100004b00000001851e9dc053 -L +epprd_rg:clmanageroha[318] : version='@(#)' 5881272 43haes/usr/sbin/cluster/events/clmanageroha.sh, 61aha_r726, 2205A_aha726, May 16 2022 12:15 PM +epprd_rg:clmanageroha[321] clodmget -n -f connection_type HACMPhmcparam +epprd_rg:clmanageroha[321] CONN_TYPE=0 +epprd_rg:clmanageroha[321] typeset -i CONN_TYPE +epprd_rg:clmanageroha[323] clodmget -q name='epprda and object like POWERVS_*' -nf name HACMPnode +epprd_rg:clmanageroha[323] 2> /dev/null +epprd_rg:clmanageroha[323] [[ -n '' ]] +epprd_rg:clmanageroha[326] export CONN_TYPE +epprd_rg:clmanageroha[331] roha_session_open -o acquire -s -l epprd_app +epprd_rg:clmanageroha[roha_session_open:131] roha_session.id=19923392 +epprd_rg:clmanageroha[roha_session_open:132] date +epprd_rg:clmanageroha[roha_session_open:132] LC_ALL=C +epprd_rg:cl_sync_vgs(0.072):datavg[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.095):datavg[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.095):datavg.epprdaloglv[check_sync:221] PS4_LOOP=datavg.epprdaloglv +epprd_rg:cl_sync_vgs(0.095):datavg.epprdaloglv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.095):datavg.epprdaloglv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.095):datavg.epprdaloglv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.095):datavg.epprdaloglv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.095):datavg.epprdaloglv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.095):datavg.saplv[check_sync:221] PS4_LOOP=datavg.saplv +epprd_rg:cl_sync_vgs(0.095):datavg.saplv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.095):datavg.saplv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.095):datavg.saplv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.095):datavg.saplv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.095):datavg.saplv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.095):datavg.sapmntlv[check_sync:221] PS4_LOOP=datavg.sapmntlv +epprd_rg:cl_sync_vgs(0.095):datavg.sapmntlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.095):datavg.sapmntlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.095):datavg.sapmntlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.095):datavg.sapmntlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.095):datavg.sapmntlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.095):datavg.oraclelv[check_sync:221] PS4_LOOP=datavg.oraclelv +epprd_rg:cl_sync_vgs(0.095):datavg.oraclelv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.095):datavg.oraclelv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.095):datavg.oraclelv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.095):datavg.oraclelv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.095):datavg.oraclelv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.095):datavg.epplv[check_sync:221] PS4_LOOP=datavg.epplv +epprd_rg:cl_sync_vgs(0.096):datavg.epplv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.096):datavg.epplv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.096):datavg.epplv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.096):datavg.epplv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.096):datavg.epplv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.096):datavg.oraarchlv[check_sync:221] PS4_LOOP=datavg.oraarchlv +epprd_rg:cl_sync_vgs(0.096):datavg.oraarchlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.096):datavg.oraarchlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.096):datavg.oraarchlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.096):datavg.oraarchlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.096):datavg.oraarchlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.096):datavg.sapdata1lv[check_sync:221] PS4_LOOP=datavg.sapdata1lv +epprd_rg:cl_sync_vgs(0.096):datavg.sapdata1lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.096):datavg.sapdata1lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.096):datavg.sapdata1lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.096):datavg.sapdata1lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.096):datavg.sapdata1lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.096):datavg.sapdata2lv[check_sync:221] PS4_LOOP=datavg.sapdata2lv +epprd_rg:cl_sync_vgs(0.096):datavg.sapdata2lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.096):datavg.sapdata2lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.096):datavg.sapdata2lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.096):datavg.sapdata2lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.096):datavg.sapdata2lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.096):datavg.sapdata3lv[check_sync:221] PS4_LOOP=datavg.sapdata3lv +epprd_rg:cl_sync_vgs(0.096):datavg.sapdata3lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.096):datavg.sapdata3lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.096):datavg.sapdata3lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.096):datavg.sapdata3lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.096):datavg.sapdata3lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.096):datavg.sapdata4lv[check_sync:221] PS4_LOOP=datavg.sapdata4lv +epprd_rg:cl_sync_vgs(0.096):datavg.sapdata4lv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.096):datavg.sapdata4lv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.096):datavg.sapdata4lv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.096):datavg.sapdata4lv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.096):datavg.sapdata4lv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.096):datavg.boardlv[check_sync:221] PS4_LOOP=datavg.boardlv +epprd_rg:cl_sync_vgs(0.096):datavg.boardlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.096):datavg.boardlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.096):datavg.boardlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.096):datavg.boardlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.096):datavg.boardlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.096):datavg.origlogAlv[che+epprd_rg:clmanageroha[roha_session_open:132] roha_session_log 'Open session 19923392 at Sat Sep 30 03:25:54 KORST 2023' ck_sync:221] PS4_LOOP=datavg.origlogAlv +epprd_rg:cl_sync_vgs(0.096):datavg.origlogAlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.096):datavg.origlogAlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.096):datavg.origlogAlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.096):datavg.origlogAlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.096):datavg.origlogAlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.096):datavg.origlogBlv[check_sync:221] PS4_LOOP=datavg.origlogBlv +epprd_rg:cl_sync_vgs(0.096):datavg.origlogBlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.096):datavg.origlogBlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.096):datavg.origlogBlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.096):datavg.origlogBlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.096):datavg.origlogBlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.096):datavg.mirrlogAlv[check_sync:221] PS4_LOOP=datavg.mirrlogAlv +epprd_rg:cl_sync_vgs(0.096):datavg.mirrlogAlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.096):datavg.mirrlogAlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.096):datavg.mirrlogAlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.096):datavg.mirrlogAlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.096):datavg.mirrlogAlv[check_sync:221] [[ high == high ]] +epprd_rg:cl_sync_vgs(0.096):datavg.mirrlogBlv[check_sync:221] PS4_LOOP=datavg.mirrlogBlv +epprd_rg:cl_sync_vgs(0.096):datavg.mirrlogBlv[check_sync:222] (( 1 != 2 && 1 != 3 && 1 != 5 && 1 != 7 )) +epprd_rg:cl_sync_vgs(0.096):datavg.mirrlogBlv[check_sync:225] : Anything else indicates no stale partitions +epprd_rg:cl_sync_vgs(0.096):datavg.mirrlogBlv[check_sync:227] continue +epprd_rg:cl_sync_vgs(0.096):datavg.mirrlogBlv[check_sync:219] read lv_id lv_name lv_status +epprd_rg:cl_sync_vgs(0.096):datavg.mirrlogBlv[check_sync:268] [[ -n RG_MOVE_COMPLETE ]] +epprd_rg:cl_sync_vgs(0.096):datavg.mirrlogBlv[check_sync:268] (( 0 == 1 )) +epprd_rg:cl_sync_vgs[355] exit 0 [ROHALOG:19923392:(0.072)] Open session 19923392 at Sat Sep 30 03:25:54 KORST 2023 +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:146] roha_session.operation=acquire +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:143] roha_session.systemmirror_mode=1 +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:149] roha_session.optimal_apps=epprd_app +epprd_rg:clmanageroha[roha_session_open:137] getopts :cso:l:t opt +epprd_rg:clmanageroha[roha_session_open:163] [[ acquire != @(acquire|release|adjust) ]] +epprd_rg:clmanageroha[roha_session_open:168] no_roha_apps=0 +epprd_rg:clmanageroha[roha_session_open:168] typeset -i no_roha_apps +epprd_rg:clmanageroha[roha_session_open:169] need_explicit_res_rel=0 +epprd_rg:clmanageroha[roha_session_open:169] typeset -i need_explicit_res_rel +epprd_rg:clmanageroha[roha_session_open:187] [[ -n epprd_app ]] +epprd_rg:clmanageroha[roha_session_open:187] clmgr q roha +epprd_rg:clmanageroha[roha_session_open:187] sort +epprd_rg:clmanageroha[roha_session_open:187] uniq -d +epprd_rg:clmanageroha[roha_session_open:187] sort -u +epprd_rg:clmanageroha[roha_session_open:187] echo epprd_app +epprd_rg:clmanageroha[roha_session_open:187] echo '\nepprd_app' +epprd_rg:clmanageroha[roha_session_open:187] [[ -z '' ]] +epprd_rg:clmanageroha[roha_session_open:189] roha_session_log 'INFO: No ROHA configured on applications.\n' [ROHALOG:19923392:(0.518)] INFO: No ROHA configured on applications. [ROHALOG:19923392:(0.518)] +epprd_rg:clmanageroha[roha_session_open:190] no_roha_apps=1 +epprd_rg:clmanageroha[roha_session_open:195] read_tunables +epprd_rg:clmanageroha[roha_session_open:196] echo '' +epprd_rg:clmanageroha[roha_session_open:196] grep -q epprda +epprd_rg:clmanageroha[roha_session_open:197] (( 1 == 0 )) +epprd_rg:clmanageroha[roha_session_open:202] (( 1 == 1 )) +epprd_rg:clmanageroha[roha_session_open:203] roha_session_read_odm_dynresop DLPAR_MEM +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_MEM -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:203] (( 0 == 0.00 )) +epprd_rg:clmanageroha[roha_session_open:204] roha_session_read_odm_dynresop DLPAR_PROCS +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_PROCS -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:204] (( 0 == 0 )) +epprd_rg:clmanageroha[roha_session_open:205] roha_session_read_odm_dynresop DLPAR_PROC_UNITS +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] clodmget -q key=DLPAR_PROC_UNITS -nf value HACMPdynresop +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] ODMDIR=/etc/es/objrepos +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:816] out='' +epprd_rg:clmanageroha[roha_session_read_odm_dynresop:817] print -- 0 +epprd_rg:clmanageroha[roha_session_open:205] (( 0 == 0.00 )) +epprd_rg:clmanageroha[roha_session_open:206] roha_session_log 'INFO: Nothing to be done.\n' [ROHALOG:19923392:(0.575)] INFO: Nothing to be done. [ROHALOG:19923392:(0.575)] +epprd_rg:clmanageroha[roha_session_open:207] exit 0 +epprd_rg:process_resources[process_applications:335] RC=0 +epprd_rg:process_resources[process_applications:336] (( 0 != 0 )) +epprd_rg:process_resources[process_applications:343] (( LPAR_ACQUIRE_FAILED == 0 )) +epprd_rg:process_resources[process_applications:345] : Loop through all groups to start or stop applications +epprd_rg:process_resources[process_applications:348] export GROUPNAME +epprd_rg:process_resources[process_applications:351] : Break out application data +epprd_rg:process_resources[process_applications:353] get_list_head epprd_app +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo epprd_app +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo epprd_app +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[process_applications:353] read LIST_OF_APPLICATIONS_FOR_RG +epprd_rg:process_resources[process_applications:354] get_list_tail epprd_app +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo epprd_app +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[process_applications:354] read ALL_APPLICATIONS +epprd_rg:process_resources[process_applications:356] get_list_head +epprd_rg:process_resources[get_list_head:59] PS4_FUNC=get_list_head +epprd_rg:process_resources[get_list_head:59] typeset PS4_FUNC +epprd_rg:process_resources[get_list_head:60] [[ high == high ]] +epprd_rg:process_resources[get_list_head:60] set -x +epprd_rg:process_resources[get_list_head:61] echo +epprd_rg:process_resources[get_list_head:61] read listhead listtail +epprd_rg:process_resources[get_list_head:61] IFS=: +epprd_rg:process_resources[get_list_head:62] echo +epprd_rg:process_resources[get_list_head:62] tr , ' ' +epprd_rg:process_resources[process_applications:356] read MISCDATA_FOR_RG +epprd_rg:process_resources[process_applications:357] get_list_tail +epprd_rg:process_resources[get_list_tail:67] PS4_FUNC=get_list_tail +epprd_rg:process_resources[get_list_tail:67] typeset PS4_FUNC +epprd_rg:process_resources[get_list_tail:68] [[ high == high ]] +epprd_rg:process_resources[get_list_tail:68] set -x +epprd_rg:process_resources[get_list_tail:69] echo +epprd_rg:process_resources[get_list_tail:69] read listhead listtail +epprd_rg:process_resources[get_list_tail:69] IFS=: +epprd_rg:process_resources[get_list_tail:70] echo +epprd_rg:process_resources[process_applications:357] read MISCDATA +epprd_rg:process_resources[process_applications:359] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[process_applications:374] APPLICATIONS=epprd_app +epprd_rg:process_resources[process_applications:374] export APPLICATIONS +epprd_rg:process_resources[process_applications:375] MISC_DATA='' +epprd_rg:process_resources[process_applications:375] export MISC_DATA +epprd_rg:process_resources[process_applications:378] : Now call start_or_stop_applications_for_rg to do the app start/stop. +epprd_rg:process_resources[process_applications:381] start_or_stop_applications_for_rg ACQUIRE /var/hacmp/log/.process_resources_applications.20381972.epprd_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:248] PS4_FUNC=start_or_stop_applications_for_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:248] typeset PS4_FUNC +epprd_rg:process_resources[start_or_stop_applications_for_rg:249] [[ high == high ]] +epprd_rg:process_resources[start_or_stop_applications_for_rg:249] set -x +epprd_rg:process_resources[start_or_stop_applications_for_rg:251] [[ ACQUIRE == ACQUIRE ]] +epprd_rg:process_resources[start_or_stop_applications_for_rg:253] cmd_to_execute=start_server +epprd_rg:process_resources[start_or_stop_applications_for_rg:259] : File name to store our exit status +epprd_rg:process_resources[start_or_stop_applications_for_rg:261] STATUS_FILE=/var/hacmp/log/.process_resources_applications.20381972.epprd_rg +epprd_rg:process_resources[start_or_stop_applications_for_rg:264] : Use clcallev to run the event +epprd_rg:process_resources[start_or_stop_applications_for_rg:266] clcallev start_server epprd_app +epprd_rg:process_resources[process_applications:384] : Add PID of the last bg start_or_stop_applications_for_rg process to WAITPIDS. +epprd_rg:process_resources[process_applications:386] WAITPIDS=' 21430780' +epprd_rg:process_resources[process_applications:390] : Wait for the start_or_stop_applications_for_rg PIDs to finish. +epprd_rg:process_resources[process_applications:393] wait 21430780 Sep 30 2023 03:25:54 EVENT START: start_server epprd_app |2023-09-30T03:25:54|18770|EVENT START: start_server epprd_app| +epprd_rg:start_server[+206] version=%I% +epprd_rg:start_server[+210] export TMP_FILE=/var/hacmp/log/.start_server.19923402 +epprd_rg:start_server[+211] export DCD=/etc/es/objrepos +epprd_rg:start_server[+212] export ACD=/usr/es/sbin/cluster/etc/objrepos/active +epprd_rg:start_server[+214] rm -f /var/hacmp/log/.start_server.19923402 +epprd_rg:start_server[+216] STATUS=0 +epprd_rg:start_server[+220] PROC_RES=false +epprd_rg:start_server[+224] [[ APPLICATIONS != 0 ]] +epprd_rg:start_server[+224] [[ APPLICATIONS != GROUP ]] +epprd_rg:start_server[+225] PROC_RES=true +epprd_rg:start_server[+228] set -u +epprd_rg:start_server[+229] typeset WPARNAME EXEC WPARDIR +epprd_rg:start_server[+230] export WPARNAME EXEC WPARDIR +epprd_rg:start_server[+232] EXEC= +epprd_rg:start_server[+233] WPARNAME= +epprd_rg:start_server[+234] WPARDIR= +epprd_rg:start_server[+237] ALLSERVERS=All_servers +epprd_rg:start_server[+238] ALLNOERRSERV=All_nonerror_servers +epprd_rg:start_server[+239] cl_RMupdate resource_acquiring All_servers start_server 2023-09-30T03:25:54.806353 2023-09-30T03:25:54.810611 +epprd_rg:start_server[+241] +epprd_rg:start_server[+241] clwparname epprd_rg +epprd_rg:clwparname[38] version=1.3.1.1 +epprd_rg:clwparname[44] clodmget '-qname = WPAR_NAME' -f group -n HACMPresource +epprd_rg:clwparname[44] [[ -z '' ]] +epprd_rg:clwparname[44] exit 0 WPARNAME= +epprd_rg:start_server[+243] (( 0 == 0 )) +epprd_rg:start_server[+243] [[ -n ]] +epprd_rg:start_server[+258] start_and_monitor_server epprd_app +epprd_rg:start_server[start_and_monitor_server+5] RETURN_STATUS=0 +epprd_rg:start_server[start_and_monitor_server+7] server=epprd_app +epprd_rg:start_server[start_and_monitor_server+12] echo Checking whether epprd_app is already running...\n Checking whether epprd_app is already running... +epprd_rg:start_server[start_and_monitor_server+12] [[ -n ]] +epprd_rg:start_server[start_and_monitor_server+18] cl_app_startup_monitor -s epprd_app -a +epprd_rg:start_server[+261] wait +epprd_rg:start_server[start_and_monitor_server+21] RETURN_STATUS=1 +epprd_rg:start_server[start_and_monitor_server+22] : exit status of cl_app_startup_monitor is: 1 +epprd_rg:start_server[start_and_monitor_server+22] [[ 1 == 0 ]] +epprd_rg:start_server[start_and_monitor_server+33] echo Application monitor(s) indicate that epprd_app is not active. Continuing with application startup.\n Application monitor(s) indicate that epprd_app is not active. Continuing with application startup. +epprd_rg:start_server[start_and_monitor_server+42] +epprd_rg:start_server[start_and_monitor_server+42] cllsserv -cn epprd_app +epprd_rg:start_server[start_and_monitor_server+42] cut -d: -f2 START=/etc/hacmp/epprd_start.sh +epprd_rg:start_server[start_and_monitor_server+43] +epprd_rg:start_server[start_and_monitor_server+43] echo /etc/hacmp/epprd_start.sh +epprd_rg:start_server[start_and_monitor_server+43] cut -d -f1 START_SCRIPT=/etc/hacmp/epprd_start.sh +epprd_rg:start_server[start_and_monitor_server+44] +epprd_rg:start_server[start_and_monitor_server+44] cllsserv -cn epprd_app +epprd_rg:start_server[start_and_monitor_server+44] cut -d: -f4 START_MODE=background +epprd_rg:start_server[start_and_monitor_server+44] [[ -z background ]] +epprd_rg:start_server[start_and_monitor_server+47] PATTERN=epprda epprd_app +epprd_rg:start_server[start_and_monitor_server+48] RETURN_STATUS=0 +epprd_rg:start_server[start_and_monitor_server+51] amlog_trace Starting application controller in background|epprd_app +epprd_rg:start_server[start_and_monitor_server+200] clcycle clavailability.log +epprd_rg:start_server[start_and_monitor_server+200] 1> /dev/null 2>& 1 +epprd_rg:start_server[start_and_monitor_server+200] +epprd_rg:start_server[start_and_monitor_server+200] cltime DATE=2023-09-30T03:25:54.864233 +epprd_rg:start_server[start_and_monitor_server+200] echo |2023-09-30T03:25:54.864233|INFO: Starting application controller in background|epprd_app +epprd_rg:start_server[start_and_monitor_server+200] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:start_server[start_and_monitor_server+51] [[ -n ]] +epprd_rg:start_server[start_and_monitor_server+51] [[ -z ]] +epprd_rg:start_server[start_and_monitor_server+51] [[ -x /etc/hacmp/epprd_start.sh ]] +epprd_rg:start_server[start_and_monitor_server+60] [ background == background ] +epprd_rg:start_server[start_and_monitor_server+62] date +epprd_rg:start_server[start_and_monitor_server+62] LC_ALL=C +epprd_rg:start_server[start_and_monitor_server+62] echo Running application controller start script for epprd_app in the background at Sat Sep 30 03:25:54 KORST 2023.\n Running application controller start script for epprd_app in the background at Sat Sep 30 03:25:54 KORST 2023. +epprd_rg:start_server[start_and_monitor_server+63] /etc/hacmp/epprd_start.sh +epprd_rg:start_server[start_and_monitor_server+63] ODMDIR=/etc/es/objrepos +epprd_rg:start_server[start_and_monitor_server+62] [[ 0 != 0 ]] +epprd_rg:start_server[start_and_monitor_server+62] [[ -n ]] +epprd_rg:start_server[start_and_monitor_server+94] cl_app_startup_monitor -s epprd_app +epprd_rg:start_server[start_and_monitor_server+97] RETURN_STATUS=0 +epprd_rg:start_server[start_and_monitor_server+98] : exit status of cl_app_startup_monitor is: 0 +epprd_rg:start_server[start_and_monitor_server+98] [[ 0 != 0 ]] +epprd_rg:start_server[start_and_monitor_server+109] echo epprd_app 0 +epprd_rg:start_server[start_and_monitor_server+109] ##### # # ##### ## ##### ##### # # # #### # # # # # # # # ## # # # ##### # # # # # # # # # # # # # ###### ##### # # # # # # ### # # # # # # # # # # ## # # ##### # # # # # # # # # #### 1> /var/hacmp/log/.start_server.19923402.epprd_app +epprd_rg:start_server[start_and_monitor_server+112] ##### # ###### ####### ###### ###### # # # # # # # # # # # # # # # # # # # # # ##### # # ###### ##### ###### ###### # ####### # # # # # # # # # # # # ##### # # # ####### # # +epprd_rg:start_server[start_and_monitor_server+112] cllsserv -cn epprd_app +epprd_rg:start_server[start_and_monitor_server+112] cut -d: -f4 START_MODE=background +epprd_rg:start_server[start_and_monitor_server+112] [[ background == foreground ]] +epprd_rg:start_server[start_and_monitor_server+132] return 0 +epprd_rg:start_server[+266] +epprd_rg:start_server[+266] cllsserv -cn epprd_app +epprd_rg:/usr/es/sbin/cluster/utilities/cllsgrp[51] [[ high == high ]] +epprd_rg:/usr/es/sbin/cluster/utilities/cllsgrp[51] version=1.11 +epprd_rg:/usr/es/sbin/cluster/utilities/cllsgrp[52] [ 0 -gt 1 ] +epprd_rg:/usr/es/sbin/cluster/utilities/cllsgrp[59] [[ '' == -p ]] +epprd_rg:/usr/es/sbin/cluster/utilities/cllsgrp[65] [[ '' == -n ]] +epprd_rg:/usr/es/sbin/cluster/utilities/cllsgrp[78] clgetgrp -f group +epprd_rg:/usr/es/sbin/cluster/utilities/cllsgrp[78] 2> /dev/null +epprd_rg:start_server[+266] cut -d: -f4 +epprd_rg:/usr/es/sbin/cluster/utilities/cllsgrp[78] sort START_MODE=background +epprd_rg:start_server[+267] [ background == background ] +epprd_rg:start_server[+269] +epprd_rg:start_server[+269] cat /var/hacmp/log/.start_server.19923402.epprd_app +epprd_rg:start_server[+269] cut -f2 -d SUCCESS=0 +epprd_rg:start_server[+269] [[ 0 != 0 ]] +epprd_rg:start_server[+274] amlog_trace Starting application controller in background|epprd_app +epprd_rg:start_server[+200] clcycle clavailability.log +epprd_rg:start_server[+200] 1> /dev/null 2>& 1 +epprd_rg:start_server[+200] +epprd_rg:start_server[+200] cltime DATE=2023-09-30T03:25:54.929365 +epprd_rg:start_server[+200] echo |2023-09-30T03:25:54.929365|INFO: Starting application controller in background|epprd_app +epprd_rg:start_server[+200] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:start_server[+276] +epprd_rg:start_server[+276] clodmget -q name = epprd_app -n -f cpu_usage_monitor HACMPserver MACTIVE=no +epprd_rg:start_server[+276] [[ no == yes ]] +epprd_rg:start_server[+292] +epprd_rg:start_server[+292] cat /var/hacmp/log/.start_server.19923402.epprd_app +epprd_rg:start_server[+292] cut -f2 -d cllsres: Resource Group not configured or not found. SUCCESS=0 +epprd_rg:start_server[+292] [[ 0 != +([0-9]) ]] +epprd_rg:start_server[+297] (( 0 != 0 )) +epprd_rg:start_server[+303] [[ 0 == 0 ]] +epprd_rg:start_server[+306] rm -f /var/hacmp/log/.start_server.19923402.epprd_app +epprd_rg:start_server[+308] cl_RMupdate resource_up All_nonerror_servers start_server 2023-09-30T03:25:54.971565 2023-09-30T03:25:54.975996 +epprd_rg:start_server[+314] exit 0 Sep 30 2023 03:25:54 EVENT COMPLETED: start_server epprd_app 0 |2023-09-30T03:25:55|18770|EVENT COMPLETED: start_server epprd_app 0| +epprd_rg:process_resources[start_or_stop_applications_for_rg:267] RC=0 +epprd_rg:process_resources[start_or_stop_applications_for_rg:269] (( 0 != 0 && 0 != 11 )) +epprd_rg:process_resources[start_or_stop_applications_for_rg:279] (( 0 != 0 )) +epprd_rg:process_resources[start_or_stop_applications_for_rg:291] : Store the result for later accumulation +epprd_rg:process_resources[start_or_stop_applications_for_rg:293] print 'epprd_rg 0' +epprd_rg:process_resources[start_or_stop_applications_for_rg:293] 1>> /var/hacmp/log/.process_resources_applications.20381972.epprd_rg +epprd_rg:process_resources[process_applications:396] : Look at all the status files to see if any were unsuccessful +epprd_rg:process_resources[process_applications:399] cat /var/hacmp/log/.process_resources_applications.20381972.epprd_rg +epprd_rg:process_resources[process_applications:399] read skip SUCCESS rest +epprd_rg:process_resources[process_applications:401] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:411] rm -f /var/hacmp/log/.process_resources_applications.20381972.epprd_rg +epprd_rg:process_resources[process_applications:416] : Release lpar resources in one-shot now that applications are stopped +epprd_rg:process_resources[process_applications:418] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[process_applications:433] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:434] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:435] [[ 0 != 0 ]] +epprd_rg:process_resources[process_applications:439] return 0 +epprd_rg:process_resources[3550] RC=0 +epprd_rg:process_resources[3551] [[ ACQUIRE == RELEASE ]] +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-09-30T03:25:55.087958 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=ONLINE RESOURCE_GROUPS='"epprd_rg"' +epprd_rg:process_resources[1] JOB_TYPE=ONLINE +epprd_rg:process_resources[1] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ ONLINE == RELEASE ]] +epprd_rg:process_resources[3360] [[ ONLINE == ONLINE ]] +epprd_rg:process_resources[3363] INFO_STRING='' +epprd_rg:process_resources[3364] clnodename +epprd_rg:process_resources[3373] ENV_VAR=GROUP_epprd_rg_epprda +epprd_rg:process_resources[3374] eval 'echo $GROUP_epprd_rg_epprda' +epprd_rg:process_resources[1] echo WILLBEUPPOSTEVENT +epprd_rg:process_resources[3374] read ENV_VAR +epprd_rg:process_resources[3375] [[ WILLBEUPPOSTEVENT == WILLBEUPPOSTEVENT ]] +epprd_rg:process_resources[3376] INFO_STRING='|DESTINATION=epprda' +epprd_rg:process_resources[3377] IS_SERVICE_STOP=0 +epprd_rg:process_resources[3379] [[ WILLBEUPPOSTEVENT == ISUPPREEVENT ]] +epprd_rg:process_resources[3373] ENV_VAR=GROUP_epprd_rg_epprds +epprd_rg:process_resources[3374] eval 'echo $GROUP_epprd_rg_epprds' +epprd_rg:process_resources[1] echo +epprd_rg:process_resources[3374] read ENV_VAR +epprd_rg:process_resources[3375] [[ '' == WILLBEUPPOSTEVENT ]] +epprd_rg:process_resources[3379] [[ '' == ISUPPREEVENT ]] +epprd_rg:process_resources[3384] (( 1 == 0 && 0 ==0 )) +epprd_rg:process_resources[3673] set_resource_group_state UP +epprd_rg:process_resources[set_resource_group_state:82] PS4_FUNC=set_resource_group_state +epprd_rg:process_resources[set_resource_group_state:82] typeset PS4_FUNC +epprd_rg:process_resources[set_resource_group_state:83] [[ high == high ]] +epprd_rg:process_resources[set_resource_group_state:83] set -x +epprd_rg:process_resources[set_resource_group_state:84] STAT=0 +epprd_rg:process_resources[set_resource_group_state:85] new_status=UP +epprd_rg:process_resources[set_resource_group_state:89] export GROUPNAME +epprd_rg:process_resources[set_resource_group_state:90] [[ UP != DOWN ]] +epprd_rg:process_resources[set_resource_group_state:92] clchdaemons -d clstrmgr_scripts -t resource_locator -n epprda -o epprd_rg -v UP +epprd_rg:process_resources[set_resource_group_state:100] : Resource Manager Updates +epprd_rg:process_resources[set_resource_group_state:116] cl_RMupdate rg_up epprd_rg process_resources 2023-09-30T03:25:55.131919 2023-09-30T03:25:55.136394 +epprd_rg:process_resources[set_resource_group_state:118] amlog_trace '' 'acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:318] clcycle clavailability.log +epprd_rg:process_resources[amlog_trace:318] 1> /dev/null 2>& 1 +epprd_rg:process_resources[amlog_trace:319] cltime +epprd_rg:process_resources[amlog_trace:319] DATE=2023-09-30T03:25:55.167798 +epprd_rg:process_resources[amlog_trace:320] echo '|2023-09-30T03:25:55.167798|INFO: acquire|epprd_rg|epprda' +epprd_rg:process_resources[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log +epprd_rg:process_resources[set_resource_group_state:153] return 0 +epprd_rg:process_resources[3324] true +epprd_rg:process_resources[3326] : call rgpa, and it will tell us what to do next +epprd_rg:process_resources[3328] set -a +epprd_rg:process_resources[3329] clRGPA +epprd_rg:clRGPA[+47] [[ high = high ]] +epprd_rg:clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ +epprd_rg:clRGPA[+49] usingVer=clrgpa +epprd_rg:clRGPA[+54] clrgpa 2023-09-30T03:25:55.180594 clrgpa +epprd_rg:clRGPA[+55] exit 0 +epprd_rg:process_resources[3329] eval JOB_TYPE=NONE +epprd_rg:process_resources[1] JOB_TYPE=NONE +epprd_rg:process_resources[3330] RC=0 +epprd_rg:process_resources[3331] set +a +epprd_rg:process_resources[3333] (( 0 != 0 )) +epprd_rg:process_resources[3342] RESOURCE_GROUPS=epprd_rg +epprd_rg:process_resources[3343] GROUPNAME=epprd_rg +epprd_rg:process_resources[3343] export GROUPNAME +epprd_rg:process_resources[3353] IS_SERVICE_START=1 +epprd_rg:process_resources[3354] IS_SERVICE_STOP=1 +epprd_rg:process_resources[3360] [[ NONE == RELEASE ]] +epprd_rg:process_resources[3360] [[ NONE == ONLINE ]] +epprd_rg:process_resources[3729] break +epprd_rg:process_resources[3740] : If sddsrv was turned off above, turn it back on again +epprd_rg:process_resources[3742] [[ FALSE == TRUE ]] +epprd_rg:process_resources[3747] exit 0 +epprd_rg:rg_move_complete[455] STATUS=0 +epprd_rg:rg_move_complete[456] : The exit status of process_resources is: 0 +epprd_rg:rg_move_complete[461] unset AM_SYNC_CALLED_BY +epprd_rg:rg_move_complete[462] [[ TRUE == TRUE ]] +epprd_rg:rg_move_complete[491] [[ -z '' ]] +epprd_rg:rg_move_complete[493] RESOURCE_GROUPS=epprd_rg +epprd_rg:rg_move_complete[499] GROUPNAME=epprd_rg +epprd_rg:rg_move_complete[499] export GROUPNAME +epprd_rg:rg_move_complete[501] cl_rrmethods2call postrg_move +epprd_rg:cl_rrmethods2call[56] version=%I% +epprd_rg:cl_rrmethods2call[84] RRMETHODS='' +epprd_rg:cl_rrmethods2call[85] NEED_RR_ENV_VARS=no +epprd_rg:cl_rrmethods2call[124] NEED_RR_ENV_VARS=yes +epprd_rg:cl_rrmethods2call[129] : Set the '*_REP_RESOURCE' variables if needed. +epprd_rg:cl_rrmethods2call[131] [[ yes == yes ]] +epprd_rg:cl_rrmethods2call[133] cllsres +epprd_rg:cl_rrmethods2call[133] 2> /dev/null +epprd_rg:cl_rrmethods2call[133] eval APPLICATIONS='"epprd_app"' EXPORT_FILESYSTEM='"/board_org' '/sapmnt/EPP"' FILESYSTEM='""' FORCED_VARYON='"false"' FSCHECK_TOOL='"fsck"' FS_BEFORE_IPADDR='"false"' MOUNT_FILESYSTEM='"/board;/board_org"' RECOVERY_METHOD='"sequential"' SERVICE_LABEL='"epprd"' SSA_DISK_FENCING='"false"' VG_AUTO_IMPORT='"false"' VOLUME_GROUP='"datavg"' USERDEFINED_RESOURCES='""' +epprd_rg:cl_rrmethods2call[1] APPLICATIONS=epprd_app +epprd_rg:cl_rrmethods2call[1] EXPORT_FILESYSTEM='/board_org /sapmnt/EPP' +epprd_rg:cl_rrmethods2call[1] FILESYSTEM='' +epprd_rg:cl_rrmethods2call[1] FORCED_VARYON=false +epprd_rg:cl_rrmethods2call[1] FSCHECK_TOOL=fsck +epprd_rg:cl_rrmethods2call[1] FS_BEFORE_IPADDR=false +epprd_rg:cl_rrmethods2call[1] MOUNT_FILESYSTEM='/board;/board_org' +epprd_rg:cl_rrmethods2call[1] RECOVERY_METHOD=sequential +epprd_rg:cl_rrmethods2call[1] SERVICE_LABEL=epprd +epprd_rg:cl_rrmethods2call[1] SSA_DISK_FENCING=false +epprd_rg:cl_rrmethods2call[1] VG_AUTO_IMPORT=false +epprd_rg:cl_rrmethods2call[1] VOLUME_GROUP=datavg +epprd_rg:cl_rrmethods2call[1] USERDEFINED_RESOURCES='' +epprd_rg:cl_rrmethods2call[137] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[142] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[147] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[152] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[157] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[162] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[167] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[172] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[182] [[ -z '' ]] +epprd_rg:cl_rrmethods2call[184] typeset sysmgdata +epprd_rg:cl_rrmethods2call[185] typeset reposmgdata +epprd_rg:cl_rrmethods2call[186] [[ -x /usr/es/sbin/cluster/xd_generic/xd_cli/clxd_list_mg_smit ]] +epprd_rg:cl_rrmethods2call[191] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[191] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[197] echo '' +epprd_rg:cl_rrmethods2call[199] return 0 +epprd_rg:rg_move_complete[501] METHODS='' +epprd_rg:rg_move_complete[516] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. +epprd_rg:rg_move_complete[518] exit 0 Sep 30 2023 03:25:55 EVENT COMPLETED: rg_move_complete epprda 1 0 |2023-09-30T03:25:55|18770|EVENT COMPLETED: rg_move_complete epprda 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-30T03:25:55.299919 + echo '|2023-09-30T03:25:55.299919|INFO: rg_move_complete|epprd_rg|epprda|1|0' + 1>> /var/hacmp/availability/clavailability.log PowerHA SystemMirror Event Summary ---------------------------------------------------------------------------- Serial number for this event: 18770 Event: TE_RG_MOVE_ACQUIRE Start time: Sat Sep 30 03:25:14 2023 End time: Sat Sep 30 03:25:55 2023 Action: Resource: Script Name: ---------------------------------------------------------------------------- Acquiring resource group: epprd_rg process_resources Search on: Sat.Sep.30.03:25:14.KORST.2023.process_resources.epprd_rg.ref Acquiring resource: All_service_addrs acquire_service_addr Search on: Sat.Sep.30.03:25:15.KORST.2023.acquire_service_addr.All_service_addrs.epprd_rg.ref Resource online: All_nonerror_service_addrs acquire_service_addr Search on: Sat.Sep.30.03:25:15.KORST.2023.acquire_service_addr.All_nonerror_service_addrs.epprd_rg.ref Acquiring resource: All_volume_groups cl_activate_vgs Search on: Sat.Sep.30.03:25:15.KORST.2023.cl_activate_vgs.All_volume_groups.epprd_rg.ref Resource online: All_nonerror_volume_groups cl_activate_vgs Search on: Sat.Sep.30.03:25:19.KORST.2023.cl_activate_vgs.All_nonerror_volume_groups.epprd_rg.ref Acquiring resource: All_filesystems cl_activate_fs Search on: Sat.Sep.30.03:25:21.KORST.2023.cl_activate_fs.All_filesystems.epprd_rg.ref Resource online: All_non_error_filesystems cl_activate_fs Search on: Sat.Sep.30.03:25:23.KORST.2023.cl_activate_fs.All_non_error_filesystems.epprd_rg.ref Acquiring resource: All_exports cl_export_fs Search on: Sat.Sep.30.03:25:32.KORST.2023.cl_export_fs.All_exports.epprd_rg.ref Resource online: All_nonerror_exports cl_export_fs Search on: Sat.Sep.30.03:25:32.KORST.2023.cl_export_fs.All_nonerror_exports.epprd_rg.ref Acquiring resource: All_nfs_mounts cl_activate_nfs Search on: Sat.Sep.30.03:25:33.KORST.2023.cl_activate_nfs.All_nfs_mounts.epprd_rg.ref Acquiring resource: All_servers start_server Search on: Sat.Sep.30.03:25:54.KORST.2023.start_server.All_servers.epprd_rg.ref Resource online: All_nonerror_servers start_server Search on: Sat.Sep.30.03:25:54.KORST.2023.start_server.All_nonerror_servers.epprd_rg.ref Resource group online: epprd_rg process_resources Search on: Sat.Sep.30.03:25:55.KORST.2023.process_resources.epprd_rg.ref ---------------------------------------------------------------------------- |EVENT_SUMMARY_START|TE_RG_MOVE_ACQUIRE|2023-09-30T03:25:14|2023-09-30T03:25:55|18770| |EV_SUM_SEARCHON_STR|Sat.Sep.30.03:25:14.KORST.2023.process_resources.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Sep.30.03:25:15.KORST.2023.acquire_service_addr.All_service_addrs.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Sep.30.03:25:15.KORST.2023.acquire_service_addr.All_nonerror_service_addrs.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Sep.30.03:25:15.KORST.2023.cl_activate_vgs.All_volume_groups.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Sep.30.03:25:19.KORST.2023.cl_activate_vgs.All_nonerror_volume_groups.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Sep.30.03:25:21.KORST.2023.cl_activate_fs.All_filesystems.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Sep.30.03:25:23.KORST.2023.cl_activate_fs.All_non_error_filesystems.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Sep.30.03:25:32.KORST.2023.cl_export_fs.All_exports.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Sep.30.03:25:32.KORST.2023.cl_export_fs.All_nonerror_exports.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Sep.30.03:25:33.KORST.2023.cl_activate_nfs.All_nfs_mounts.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Sep.30.03:25:54.KORST.2023.start_server.All_servers.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Sep.30.03:25:54.KORST.2023.start_server.All_nonerror_servers.epprd_rg.ref.ref| |EV_SUM_SEARCHON_STR|Sat.Sep.30.03:25:55.KORST.2023.process_resources.epprd_rg.ref.ref| |EVENT_SUMMARY_END| LSNRCTL for IBM/AIX RISC System/6000: Version 11.2.0.3.0 - Production on 30-SEP-2023 03:25:55 Copyright (c) 1991, 2011, Oracle. All rights reserved. Starting /oracle/EPP/112_64/bin/tnslsnr: please wait... TNSLSNR for IBM/AIX RISC System/6000: Version 11.2.0.3.0 - Production System parameter file is /oracle/EPP/112_64/network/admin/listener.ora Log messages written to /oracle/EPP/saptrace/diag/tnslsnr/epprda/listener/alert/log.xml Listening on: (DESCRIPTION=(ADDRESS=(PROTOCOL=ipc)(KEY=EPP.WORLD))) Listening on: (DESCRIPTION=(ADDRESS=(PROTOCOL=ipc)(KEY=EPP))) Listening on: (DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=61.81.244.156)(PORT=1521))) Connecting to (ADDRESS=(PROTOCOL=IPC)(KEY=EPP.WORLD)) STATUS of the LISTENER ------------------------ Alias LISTENER Version TNSLSNR for IBM/AIX RISC System/6000: Version 11.2.0.3.0 - Production Start Date 30-SEP-2023 03:25:55 Uptime 0 days 0 hr. 0 min. 0 sec Trace Level off Security ON: Local OS Authentication SNMP ON Listener Parameter File /oracle/EPP/112_64/network/admin/listener.ora Listener Log File /oracle/EPP/saptrace/diag/tnslsnr/epprda/listener/alert/log.xml Listening Endpoints Summary... (DESCRIPTION=(ADDRESS=(PROTOCOL=ipc)(KEY=EPP.WORLD))) (DESCRIPTION=(ADDRESS=(PROTOCOL=ipc)(KEY=EPP))) (DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=61.81.244.156)(PORT=1521))) Services Summary... Service "EPP" has 1 instance(s). Instance "EPP", status UNKNOWN, has 1 handler(s) for this service... The command completed successfully PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 18771 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_NODE_DEP_COMPLETE|2023-09-30T03:25:57|18771| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Sep 30 2023 03:25:57 EVENT START: node_up_complete epprda |2023-09-30T03:25:57|18771|EVENT START: node_up_complete epprda| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-30T03:25:57.501318 + echo '|2023-09-30T03:25:57.501318|INFO: node_up_complete|epprda' + 1>> /var/hacmp/availability/clavailability.log + version=%I% + set -a + cllsparam -n epprda + eval NODE_NAME=epprda VERBOSE_LOGGING=high PS4=$'\'${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO]' $'\'' DEBUG_LEVEL=Standard LC_ALL=$'\'C\'' + NODE_NAME=epprda + VERBOSE_LOGGING=high :node_up_complete[1] PS4='${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO] ' :node_up_complete[1] DEBUG_LEVEL=Standard :node_up_complete[1] LC_ALL=C :node_up_complete[80] set +a :node_up_complete[82] NODENAME=epprda :node_up_complete[83] RC=0 :node_up_complete[83] typeset -i RC :node_up_complete[84] UPDATESTATD=0 :node_up_complete[84] typeset -i UPDATESTATD :node_up_complete[86] LPM_IN_PROGRESS_DIR=/var/hacmp/.lpm_in_progress :node_up_complete[86] typeset LPM_IN_PROGRESS_DIR :node_up_complete[87] LPM_IN_PROGRESS_PREFIX=lpm :node_up_complete[87] typeset LPM_IN_PROGRESS_PREFIX :node_up_complete[88] STATE_FILE=/var/hacmp/cl_dr.state :node_up_complete[88] typeset STATE_FILE :node_up_complete[97] STATUS=0 :node_up_complete[99] set -u :node_up_complete[101] (( 1 < 1 )) :node_up_complete[107] START_MODE='' :node_up_complete[107] typeset START_MODE :node_up_complete[108] (( 1 > 1 )) :node_up_complete[114] : serial number for this event is 18771 :node_up_complete[118] RPCLOCKDSTOPPED=0 :node_up_complete[118] typeset -i RPCLOCKDSTOPPED :node_up_complete[119] [[ -f /tmp/.RPCLOCKDSTOPPED ]] :node_up_complete[127] wc -l :node_up_complete[127] clnodename :node_up_complete[127] (( 2 == 2 )) :node_up_complete[129] clodmget -f group -n HACMPgroup :node_up_complete[129] RESOURCE_GROUPS=epprd_rg :node_up_complete[132] clodmget -q group='epprd_rg AND name=EXPORT_FILESYSTEM' -f value -n HACMPresource :node_up_complete[132] EXPORTLIST=$'/board_org\n/sapmnt/EPP' :node_up_complete[133] [[ -n $'/board_org\n/sapmnt/EPP' ]] :node_up_complete[135] UPDATESTATD=1 :node_up_complete[136] [[ epprda == epprda ]] :node_up_complete[139] lssrc -s rpc.statd :node_up_complete[139] LC_ALL=C :node_up_complete[139] grep inoperative :node_up_complete[140] (( 1 == 0 )) :node_up_complete[146] cl_update_statd :cl_update_statd(0)[+174] version=%I% :cl_update_statd(0)[+176] typeset -i RC=0 :cl_update_statd(0)[+178] LOCAL_FOUND= :cl_update_statd(0)[+179] TWIN_NAME= :cl_update_statd(0)[+180] [[ -z epprda ]] :cl_update_statd(0)[+181] :cl_update_statd(0)[+181] cl_get_path -S OP_SEP=~ :cl_update_statd(0)[+182] set -u :cl_update_statd(0)[+187] LOCAL_FOUND=true :cl_update_statd(0)[+194] : Make sure statd is running locally :cl_update_statd(0)[+196] lssrc -s statd :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw inoperative :cl_update_statd(0)[+196] rpcinfo -p :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw status :cl_update_statd(0)[+207] : Get the current twin, if there is one :cl_update_statd(0)[+209] :cl_update_statd(0)[+209] nfso -H sm_gethost :cl_update_statd(0)[+209] 2>& 1 CURTWIN= :cl_update_statd(0)[+210] RC=0 :cl_update_statd(0)[+212] [[ -z true ]] :cl_update_statd(0)[+212] [[ -z ]] :cl_update_statd(0)[+215] : Local node is no longer a cluster member, unregister its twin :cl_update_statd(0)[+215] [[ -n ]] :cl_update_statd(0)[+259] : RC is actually 0 :cl_update_statd(0)[+266] return 0 :node_up_complete[147] (( 0 )) :node_up_complete[151] break :node_up_complete[156] (( 1 )) :node_up_complete[158] (( 0 )) :node_up_complete[198] [[ TRUE == FALSE ]] :node_up_complete[268] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. :node_up_complete[270] : This is the final clRGinfo output :node_up_complete[272] clRGinfo -p -t :node_up_complete[272] 2>& 1 clRGinfo[431]: version I clRGinfo[517]: Number of resource groups = 0 clRGinfo[562]: cluster epprda_cluster is version = 22 clRGinfo[597]: no resource groups specified on command line - print all clRGinfo[685]: Current group is 'epprd_rg' get primary state info for state 6 get secondary state info for state 6 getPrimaryStateStr: using primary_table => primary_state_table get primary state info for state 4 get secondary state info for state 4 getPrimaryStateStr: using primary_table => primary_state_table Cluster Name: epprda_cluster Resource Group Name: epprd_rg Node Group State Delayed Timers ---------------------------------------------------------------- --------------- ------------------- epprda ONLINE epprds OFFLINE :node_up_complete[277] (( 0 == 0 )) :node_up_complete[279] [[ epprda != epprda ]] :node_up_complete[300] exit 0 Sep 30 2023 03:25:57 EVENT COMPLETED: node_up_complete epprda 0 |2023-09-30T03:25:57|18771|EVENT COMPLETED: node_up_complete epprda 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-30T03:25:57.728064 + echo '|2023-09-30T03:25:57.728064|INFO: node_up_complete|epprda|0' + 1>> /var/hacmp/availability/clavailability.log Checking EPP Database ------------------------------------------- J2EE Database is not available via test See logfile /home/eppadm/JdbcCon.log Running /usr/sap/EPP/SYS/exe/run/startj2eedb PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 22392 Cluster services started on node 'epprds' Node Up Completion Event has been enqueued. ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_NODE_DEP|2023-09-30T03:26:01|22392| |NODE_UP_COMPLETE| |EVENT_PREAMBLE_END| Trying to start EPP database ... Log file: /home/eppadm/startdb.log Sep 30 2023 03:26:04 EVENT START: node_up epprds |2023-09-30T03:26:04|22392|EVENT START: node_up epprds| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-30T03:26:04.182774 + echo '|2023-09-30T03:26:04.182774|INFO: node_up|epprds' + 1>> /var/hacmp/availability/clavailability.log :node_up[182] version=%I% :node_up[185] NODENAME=epprds :node_up[185] export NODENAME :node_up[193] STATUS=0 :node_up[193] typeset -li STATUS :node_up[194] RC=0 :node_up[194] typeset -li RC :node_up[195] ENABLE_NFS_CROSS_MOUNT=false :node_up[196] START_MODE='' :node_up[196] typeset START_MODE :node_up[198] set -u :node_up[200] (( 1 < 1 )) :node_up[200] (( 1 > 2 )) :node_up[207] : serial number for this event is 22392 :node_up[210] [[ epprda == epprds ]] :node_up[219] (( 1 > 1 )) :node_up[256] : If RG_DEPENDENCIES=false, process RGs with clsetenvgrp :node_up[258] [[ TRUE == FALSE ]] :node_up[281] : localnode processing prior to RG acquisition :node_up[283] [[ epprda == epprds ]] :node_up[498] : Enable NFS crossmounts during manual start :node_up[500] [[ -n false ]] :node_up[500] [[ false == true ]] :node_up[607] : When RG dependencies are not configured we call node_up_local/remote, :node_up[608] : followed by process_resources to process any remaining groups :node_up[610] [[ TRUE == FALSE ]] :node_up[657] [[ epprda == epprds ]] :node_up[667] return 0 Sep 30 2023 03:26:04 EVENT COMPLETED: node_up epprds 0 |2023-09-30T03:26:04|22392|EVENT COMPLETED: node_up epprds 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-30T03:26:04.304987 + echo '|2023-09-30T03:26:04.304987|INFO: node_up|epprds|0' + 1>> /var/hacmp/availability/clavailability.log Sep 30 2023 03:26:07 EVENT START: rg_move_fence epprds 1 |2023-09-30T03:26:07|22393|EVENT START: rg_move_fence epprds 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-30T03:26:07.855278 + echo '|2023-09-30T03:26:07.855278|INFO: rg_move_fence|epprd_rg|epprds|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_fence[62] [[ high == high ]] :rg_move_fence[62] version=1.11 :rg_move_fence[63] NODENAME=epprds :rg_move_fence[63] export NODENAME :rg_move_fence[65] set -u :rg_move_fence[67] [ 2 != 2 ] :rg_move_fence[73] set +u :rg_move_fence[75] [[ -z TRUE ]] :rg_move_fence[80] [[ TRUE == TRUE ]] :rg_move_fence[82] LOCAL_NODENAME=epprda :rg_move_fence[83] odmget -qid=1 HACMPgroup :rg_move_fence[83] egrep 'group =' :rg_move_fence[83] awk '{print $3}' :rg_move_fence[83] eval RGNAME='"epprd_rg"' :rg_move_fence[1] RGNAME=epprd_rg +epprd_rg:rg_move_fence[84] GROUPNAME=epprd_rg +epprd_rg:rg_move_fence[85] group_state='$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[86] set +u +epprd_rg:rg_move_fence[87] eval print '$RESGRP_epprd_rg_epprda' +epprd_rg:rg_move_fence[1] print ONLINE +epprd_rg:rg_move_fence[87] RG_MOVE_ONLINE=ONLINE +epprd_rg:rg_move_fence[87] export RG_MOVE_ONLINE +epprd_rg:rg_move_fence[88] set -u +epprd_rg:rg_move_fence[89] RG_MOVE_ONLINE=ONLINE +epprd_rg:rg_move_fence[91] set -a +epprd_rg:rg_move_fence[92] clsetenvgrp epprda rg_move epprd_rg '' :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprda rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 +epprd_rg:rg_move_fence[92] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[93] RC=0 +epprd_rg:rg_move_fence[94] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_fence[1] FORCEDOWN_GROUPS='' +epprd_rg:rg_move_fence[2] RESOURCE_GROUPS='' +epprd_rg:rg_move_fence[3] HOMELESS_GROUPS='' +epprd_rg:rg_move_fence[4] HOMELESS_FOLLOWER_GROUPS='' +epprd_rg:rg_move_fence[5] ERRSTATE_GROUPS='' +epprd_rg:rg_move_fence[6] PRINCIPAL_ACTIONS='' +epprd_rg:rg_move_fence[7] ASSOCIATE_ACTIONS='' +epprd_rg:rg_move_fence[8] AUXILLIARY_ACTIONS='' +epprd_rg:rg_move_fence[8] SIBLING_GROUPS='' +epprd_rg:rg_move_fence[9] SIBLING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[10] SIBLING_ACQUIRING_GROUPS='' +epprd_rg:rg_move_fence[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[12] SIBLING_RELEASING_GROUPS='' +epprd_rg:rg_move_fence[13] SIBLING_RELEASING_NODES_BY_GROUP='' +epprd_rg:rg_move_fence[95] set +a +epprd_rg:rg_move_fence[96] [ 0 -ne 0 ] +epprd_rg:rg_move_fence[103] process_resources FENCE :rg_move_fence[3318] version=1.169 :rg_move_fence[3321] STATUS=0 :rg_move_fence[3322] sddsrv_off=FALSE :rg_move_fence[3324] true :rg_move_fence[3326] : call rgpa, and it will tell us what to do next :rg_move_fence[3328] set -a :rg_move_fence[3329] clRGPA FENCE :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa FENCE 2023-09-30T03:26:07.974131 clrgpa :clRGPA[+55] exit 0 :rg_move_fence[3329] eval JOB_TYPE=NONE :rg_move_fence[1] JOB_TYPE=NONE :rg_move_fence[3330] RC=0 :rg_move_fence[3331] set +a :rg_move_fence[3333] (( 0 != 0 )) :rg_move_fence[3342] RESOURCE_GROUPS='' :rg_move_fence[3343] GROUPNAME='' :rg_move_fence[3343] export GROUPNAME :rg_move_fence[3353] IS_SERVICE_START=1 :rg_move_fence[3354] IS_SERVICE_STOP=1 :rg_move_fence[3360] [[ NONE == RELEASE ]] :rg_move_fence[3360] [[ NONE == ONLINE ]] :rg_move_fence[3729] break :rg_move_fence[3740] : If sddsrv was turned off above, turn it back on again :rg_move_fence[3742] [[ FALSE == TRUE ]] :rg_move_fence[3747] exit 0 +epprd_rg:rg_move_fence[104] : exit status of process_resources FENCE is: 0 +epprd_rg:rg_move_fence[107] [[ TRUE == TRUE ]] +epprd_rg:rg_move_fence[109] export EVENT_TYPE +epprd_rg:rg_move_fence[110] echo ACQUIRE_PRIMARY_NFS ACQUIRE_PRIMARY_NFS +epprd_rg:rg_move_fence[111] [[ -n '' ]] +epprd_rg:rg_move_fence[141] exit 0 Sep 30 2023 03:26:07 EVENT COMPLETED: rg_move_fence epprds 1 0 |2023-09-30T03:26:08|22393|EVENT COMPLETED: rg_move_fence epprds 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-30T03:26:08.078134 + echo '|2023-09-30T03:26:08.078134|INFO: rg_move_fence|epprd_rg|epprds|1|0' + 1>> /var/hacmp/availability/clavailability.log Sep 30 2023 03:26:08 EVENT START: rg_move_acquire epprds 1 |2023-09-30T03:26:08|22393|EVENT START: rg_move_acquire epprds 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-30T03:26:08.288203 + echo '|2023-09-30T03:26:08.288203|INFO: rg_move_acquire|epprd_rg|epprds|1' + 1>> /var/hacmp/availability/clavailability.log :rg_move_acquire[+54] [[ high == high ]] :rg_move_acquire[+54] version=1.9.1.7 :rg_move_acquire[+57] set -u :rg_move_acquire[+59] [ 2 != 2 ] :rg_move_acquire[+65] set +u :rg_move_acquire[+67] :rg_move_acquire[+67] clodmget -n -q id=1 -f group HACMPgroup RG=epprd_rg :rg_move_acquire[+68] export RG :rg_move_acquire[+70] [[ ACQUIRE_PRIMARY_NFS == ACQUIRE_PRIMARY ]] :rg_move_acquire[+118] clcallev rg_move epprds 1 ACQUIRE Sep 30 2023 03:26:08 EVENT START: rg_move epprds 1 ACQUIRE |2023-09-30T03:26:08|22393|EVENT START: rg_move epprds 1 ACQUIRE| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-09-30T03:26:08.416087 :clevlog[amlog_trace:320] echo '|2023-09-30T03:26:08.416087|INFO: rg_move|epprd_rg|epprds|1|ACQUIRE' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] grep -w epprda :get_local_nodename[63] clnodename :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :rg_move[76] version=%I% :rg_move[86] STATUS=0 :rg_move[88] [[ ! -n '' ]] :rg_move[90] EMULATE=REAL :rg_move[96] set -u :rg_move[98] NODENAME=epprds :rg_move[98] export NODENAME :rg_move[99] RGID=1 :rg_move[100] (( 3 == 3 )) :rg_move[102] ACTION=ACQUIRE :rg_move[108] : serial number for this event is 22393 :rg_move[112] RG_UP_POSTEVENT_ON_NODE=epprds :rg_move[112] export RG_UP_POSTEVENT_ON_NODE :rg_move[116] clodmget -qid=1 -f group -n HACMPgroup :rg_move[116] eval RGNAME=epprd_rg :rg_move[1] RGNAME=epprd_rg :rg_move[118] UPDATESTATD=0 :rg_move[119] export UPDATESTATD :rg_move[123] RG_MOVE_EVENT=true :rg_move[123] export RG_MOVE_EVENT :rg_move[128] group_state='$RESGRP_epprd_rg_epprda' :rg_move[129] set +u :rg_move[130] eval print '$RESGRP_epprd_rg_epprda' :rg_move[1] print ONLINE :rg_move[130] RG_MOVE_ONLINE=ONLINE :rg_move[130] export RG_MOVE_ONLINE :rg_move[131] set -u :rg_move[132] RG_MOVE_ONLINE=ONLINE :rg_move[139] rm -f /tmp/.NFSSTOPPED :rg_move[140] rm -f /tmp/.RPCLOCKDSTOPPED :rg_move[147] set -a :rg_move[148] clsetenvgrp epprds rg_move epprd_rg :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprds rg_move epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 :rg_move[148] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[149] RC=0 :rg_move[150] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' :rg_move[1] FORCEDOWN_GROUPS='' :rg_move[2] RESOURCE_GROUPS='' :rg_move[3] HOMELESS_GROUPS='' :rg_move[4] HOMELESS_FOLLOWER_GROUPS='' :rg_move[5] ERRSTATE_GROUPS='' :rg_move[6] PRINCIPAL_ACTIONS='' :rg_move[7] ASSOCIATE_ACTIONS='' :rg_move[8] AUXILLIARY_ACTIONS='' :rg_move[8] SIBLING_GROUPS='' :rg_move[9] SIBLING_NODES_BY_GROUP='' :rg_move[10] SIBLING_ACQUIRING_GROUPS='' :rg_move[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' :rg_move[12] SIBLING_RELEASING_GROUPS='' :rg_move[13] SIBLING_RELEASING_NODES_BY_GROUP='' :rg_move[151] set +a :rg_move[155] (( 0 != 0 )) :rg_move[155] [[ -z epprd_rg ]] :rg_move[164] [[ -z TRUE ]] :rg_move[241] AM_SYNC_CALLED_BY=RG_MOVE :rg_move[241] export AM_SYNC_CALLED_BY :rg_move[242] process_resources :process_resources[3318] version=1.169 :process_resources[3321] STATUS=0 :process_resources[3322] sddsrv_off=FALSE :process_resources[3324] true :process_resources[3326] : call rgpa, and it will tell us what to do next :process_resources[3328] set -a :process_resources[3329] clRGPA :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa 2023-09-30T03:26:08.538032 clrgpa :clRGPA[+55] exit 0 :process_resources[3329] eval JOB_TYPE=NONE :process_resources[1] JOB_TYPE=NONE :process_resources[3330] RC=0 :process_resources[3331] set +a :process_resources[3333] (( 0 != 0 )) :process_resources[3342] RESOURCE_GROUPS='' :process_resources[3343] GROUPNAME='' :process_resources[3343] export GROUPNAME :process_resources[3353] IS_SERVICE_START=1 :process_resources[3354] IS_SERVICE_STOP=1 :process_resources[3360] [[ NONE == RELEASE ]] :process_resources[3360] [[ NONE == ONLINE ]] :process_resources[3729] break :process_resources[3740] : If sddsrv was turned off above, turn it back on again :process_resources[3742] [[ FALSE == TRUE ]] :process_resources[3747] exit 0 :rg_move[247] : unsetting AM_SYNC_CALLED_BY from $'callers environment as\n: we dont' require it after this point in execution. :rg_move[250] unset AM_SYNC_CALLED_BY :rg_move[253] [[ -f /tmp/.NFSSTOPPED ]] :rg_move[274] [[ -f /tmp/.RPCLOCKDSTOPPED ]] :rg_move[293] exit 0 Sep 30 2023 03:26:08 EVENT COMPLETED: rg_move epprds 1 ACQUIRE 0 |2023-09-30T03:26:08|22393|EVENT COMPLETED: rg_move epprds 1 ACQUIRE 0| :clevlog[amlog_trace:318] clcycle clavailability.log :clevlog[amlog_trace:318] 1> /dev/null 2>& 1 :clevlog[amlog_trace:319] cltime :clevlog[amlog_trace:319] DATE=2023-09-30T03:26:08.666664 :clevlog[amlog_trace:320] echo '|2023-09-30T03:26:08.666664|INFO: rg_move|epprd_rg|epprds|1|ACQUIRE|0' :clevlog[amlog_trace:320] 1>> /var/hacmp/availability/clavailability.log :rg_move_acquire[+119] exit_status=0 :rg_move_acquire[+120] : exit status of clcallev rg_move epprds 1 ACQUIRE is: 0 :rg_move_acquire[+121] exit 0 Sep 30 2023 03:26:08 EVENT COMPLETED: rg_move_acquire epprds 1 0 |2023-09-30T03:26:08|22393|EVENT COMPLETED: rg_move_acquire epprds 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-30T03:26:08.777345 + echo '|2023-09-30T03:26:08.777345|INFO: rg_move_acquire|epprd_rg|epprds|1|0' + 1>> /var/hacmp/availability/clavailability.log Sep 30 2023 03:26:08 EVENT START: rg_move_complete epprds 1 |2023-09-30T03:26:08|22393|EVENT START: rg_move_complete epprds 1| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-30T03:26:08.959618 + echo '|2023-09-30T03:26:08.959618|INFO: rg_move_complete|epprd_rg|epprds|1' + 1>> /var/hacmp/availability/clavailability.log :get_local_nodename[48] version=1.2.1.28 :get_local_nodename[52] : cllsclstr -N will return the local node if not configured in HACMPcluster :get_local_nodename[54] ODMDIR=/etc/es/objrepos :get_local_nodename[54] export ODMDIR :get_local_nodename[55] nodename='' :get_local_nodename[55] typeset nodename :get_local_nodename[56] cllsclstr -N :get_local_nodename[56] nodename=epprda :get_local_nodename[57] rc=0 :get_local_nodename[57] typeset -i rc :get_local_nodename[58] (( 0 != 0 )) :get_local_nodename[61] : If the node name in HACMPcluster matches a configured node, we are done. :get_local_nodename[63] grep -w epprda :get_local_nodename[63] clnodename :get_local_nodename[63] [[ -n epprda ]] :get_local_nodename[65] print -- epprda :get_local_nodename[66] exit 0 :rg_move_complete[91] version=%I% :rg_move_complete[97] STATUS=0 :rg_move_complete[97] typeset -li STATUS :rg_move_complete[99] [[ -z '' ]] :rg_move_complete[101] EMULATE=REAL :rg_move_complete[104] set -u :rg_move_complete[106] (( 2 < 2 || 2 > 3 )) :rg_move_complete[112] NODENAME=epprds :rg_move_complete[112] export NODENAME :rg_move_complete[113] RGID=1 :rg_move_complete[114] (( 2 == 3 )) :rg_move_complete[118] RGDESTINATION='' :rg_move_complete[122] : serial number for this event is 22393 :rg_move_complete[126] : Interpret resource group ID into a resource group name. :rg_move_complete[128] clodmget -qid=1 -f group -n HACMPgroup :rg_move_complete[128] eval RGNAME=epprd_rg :rg_move_complete[1] RGNAME=epprd_rg +epprd_rg:rg_move_complete[129] GROUPNAME=epprd_rg +epprd_rg:rg_move_complete[131] UPDATESTATD=0 +epprd_rg:rg_move_complete[131] typeset -li UPDATESTATD +epprd_rg:rg_move_complete[132] NFSSTOPPED=0 +epprd_rg:rg_move_complete[132] typeset -li NFSSTOPPED +epprd_rg:rg_move_complete[133] LIMIT=60 +epprd_rg:rg_move_complete[133] WAIT=1 +epprd_rg:rg_move_complete[133] TRY=0 +epprd_rg:rg_move_complete[133] typeset -li LIMIT WAIT TRY +epprd_rg:rg_move_complete[136] : If this is a two node cluster and exported filesystems exist, then +epprd_rg:rg_move_complete[137] : when the cluster topology is stable notify rpc.statd of the changes. +epprd_rg:rg_move_complete[139] wc -l +epprd_rg:rg_move_complete[139] clnodename +epprd_rg:rg_move_complete[139] (( 2 == 2 )) +epprd_rg:rg_move_complete[141] clodmget -f group -n HACMPgroup +epprd_rg:rg_move_complete[141] RESOURCE_GROUPS=epprd_rg +epprd_rg:rg_move_complete[144] clodmget -q group='epprd_rg AND name=EXPORT_FILESYSTEM' -f value -n HACMPresource +epprd_rg:rg_move_complete[144] EXPORTLIST=$'/board_org\n/sapmnt/EPP' +epprd_rg:rg_move_complete[146] [[ -n $'/board_org\n/sapmnt/EPP' ]] +epprd_rg:rg_move_complete[146] [[ epprd_rg == epprd_rg ]] +epprd_rg:rg_move_complete[148] UPDATESTATD=1 +epprd_rg:rg_move_complete[149] [[ REAL == EMUL ]] +epprd_rg:rg_move_complete[154] cl_update_statd :cl_update_statd(0)[+174] version=%I% :cl_update_statd(0)[+176] typeset -i RC=0 :cl_update_statd(0)[+178] LOCAL_FOUND= :cl_update_statd(0)[+179] TWIN_NAME= :cl_update_statd(0)[+180] [[ -z epprda ]] :cl_update_statd(0)[+181] :cl_update_statd(0)[+181] cl_get_path -S OP_SEP=~ :cl_update_statd(0)[+182] set -u :cl_update_statd(0)[+187] LOCAL_FOUND=true :cl_update_statd(0)[+189] TWIN_NAME=epprds :cl_update_statd(0)[+194] : Make sure statd is running locally :cl_update_statd(0)[+196] grep -qw inoperative :cl_update_statd(0)[+196] lssrc -s statd :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw status :cl_update_statd(0)[+196] rpcinfo -p :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+207] : Get the current twin, if there is one :cl_update_statd(0)[+209] :cl_update_statd(0)[+209] nfso -H sm_gethost :cl_update_statd(0)[+209] 2>& 1 CURTWIN= :cl_update_statd(0)[+210] RC=0 :cl_update_statd(0)[+212] [[ -z true ]] :cl_update_statd(0)[+212] [[ -z epprds ]] :cl_update_statd(0)[+225] : Get the interface to the twin node :cl_update_statd(0)[+227] :cl_update_statd(0)[+227] get_node_ip epprds :cl_update_statd(0)[+9] (( 1 != 1 )) :cl_update_statd(0)[+15] Twin_Name=epprds :cl_update_statd(0)[+16] NewTwin= :cl_update_statd(0)[+19] : Get the Interface details for every interface on the twin node :cl_update_statd(0)[+20] : Reject interfaces on nodes that are not public boot addresses :cl_update_statd(0)[+21] : because those are the only ones we have state information for :cl_update_statd(0)[+23] :cl_update_statd(0)[+23] cllsif -J ~ -Sw -i epprda :cl_update_statd(0)[+23] LC_ALL=C LOCAL_NETWORK_INFO=epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+25] read adapt type network net_type attrib node ip_addr skip interface skip netmask skip skip prefix ip_family :cl_update_statd(0)[+25] IFS=~ :cl_update_statd(0)[+24] cllsif -J ~ -Sw -i epprds :cl_update_statd(0)[+24] LC_ALL=C :cl_update_statd(0)[+25] [[ public != public ]] :cl_update_statd(0)[+25] [[ boot != boot ]] :cl_update_statd(0)[+33] : Find the state of this candidate :cl_update_statd(0)[+33] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+37] :cl_update_statd(0)[+37] tr ./ xx :cl_update_statd(0)[+37] print 61.81.244.123 addr=i61x81x244x123_epprds :cl_update_statd(0)[+43] eval candidate_state=${i61x81x244x123_epprds:-down} :cl_update_statd(0)[+43] candidate_state=UP :cl_update_statd(0)[+46] : If state is UP, check to see if this node can talk to it :cl_update_statd(0)[+46] [[ UP == UP ]] :cl_update_statd(0)[+50] ping -w 5 -c 1 -q 61.81.244.123 :cl_update_statd(0)[+50] 1> /dev/null :cl_update_statd(0)[+61] echo epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+62] read lcl_adapt lcl_type lcl_network lcl_net_type lcl_attrib lcl_node lcl_ip_addr skip lcl_interface skip lcl_netmask skip skip lcl_prefix lcl_ip_family :cl_update_statd(0)[+62] IFS=~ :cl_update_statd(0)[+61] tr \n :cl_update_statd(0)[+62] [[ net_ether_01 != net_ether_01 ]] :cl_update_statd(0)[+62] [[ boot != boot ]] :cl_update_statd(0)[+62] [[ public != public ]] :cl_update_statd(0)[+62] [[ AF_INET != AF_INET ]] :cl_update_statd(0)[+62] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+71] :cl_update_statd(0)[+71] tr ./ xx :cl_update_statd(0)[+71] print 61.81.244.134 addr=i61x81x244x134_epprda :cl_update_statd(0)[+77] eval lcl_candidate_state=${i61x81x244x134_epprda:-down} :cl_update_statd(0)[+77] lcl_candidate_state=UP :cl_update_statd(0)[+77] [[ UP == UP ]] :cl_update_statd(0)[+81] : epprds is on the same network as an interface that is up :cl_update_statd(0)[+82] : on the local node, and the attributes match. :cl_update_statd(0)[+84] NewTwin=epprds :cl_update_statd(0)[+85] break :cl_update_statd(0)[+85] [[ -n epprds ]] :cl_update_statd(0)[+91] break :cl_update_statd(0)[+91] [[ -z epprds ]] :cl_update_statd(0)[+100] echo epprds :cl_update_statd(0)[+101] return 0 NEWTWIN=epprds :cl_update_statd(0)[+227] [[ -z epprds ]] :cl_update_statd(0)[+227] [[ epprds != ]] :cl_update_statd(0)[+243] : Need to register a new twin :cl_update_statd(0)[+243] [[ -n ]] :cl_update_statd(0)[+251] : Register our new twin, epprds :cl_update_statd(0)[+253] nfso -H sm_register epprds :cl_update_statd(0)[+254] RC=0 :cl_update_statd(0)[+259] : RC is actually 0 :cl_update_statd(0)[+266] return 0 +epprd_rg:rg_move_complete[155] (( 0 != 0 )) +epprd_rg:rg_move_complete[160] break +epprd_rg:rg_move_complete[166] : Set the RESOURCE_GROUPS environment variable with the names +epprd_rg:rg_move_complete[167] : of all resource groups participating in this event, and export +epprd_rg:rg_move_complete[168] : them to all successive scripts. +epprd_rg:rg_move_complete[170] set -a +epprd_rg:rg_move_complete[171] clsetenvgrp epprds rg_move_complete epprd_rg :clsetenvgrp[+49] [[ high = high ]] :clsetenvgrp[+49] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clsetenvgrp.sh 1$ :clsetenvgrp[+51] usingVer=clSetenvgrp :clsetenvgrp[+56] clSetenvgrp epprds rg_move_complete epprd_rg executing clSetenvgrp clSetenvgrp completed successfully :clsetenvgrp[+57] exit 0 +epprd_rg:rg_move_complete[171] clsetenvgrp_output=FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_complete[172] RC=0 +epprd_rg:rg_move_complete[173] eval FORCEDOWN_GROUPS=$'"" \nRESOURCE_GROUPS="" \nHOMELESS_GROUPS="" \nHOMELESS_FOLLOWER_GROUPS="" \nERRSTATE_GROUPS="" \nPRINCIPAL_ACTIONS="" \nASSOCIATE_ACTIONS="" \nAUXILLIARY_ACTIONS="" SIBLING_GROUPS=""\nSIBLING_NODES_BY_GROUP=""\nSIBLING_ACQUIRING_GROUPS=""\nSIBLING_ACQUIRING_NODES_BY_GROUP=""\nSIBLING_RELEASING_GROUPS=""\nSIBLING_RELEASING_NODES_BY_GROUP=""\n ' +epprd_rg:rg_move_complete[1] FORCEDOWN_GROUPS='' +epprd_rg:rg_move_complete[2] RESOURCE_GROUPS='' +epprd_rg:rg_move_complete[3] HOMELESS_GROUPS='' +epprd_rg:rg_move_complete[4] HOMELESS_FOLLOWER_GROUPS='' +epprd_rg:rg_move_complete[5] ERRSTATE_GROUPS='' +epprd_rg:rg_move_complete[6] PRINCIPAL_ACTIONS='' +epprd_rg:rg_move_complete[7] ASSOCIATE_ACTIONS='' +epprd_rg:rg_move_complete[8] AUXILLIARY_ACTIONS='' +epprd_rg:rg_move_complete[8] SIBLING_GROUPS='' +epprd_rg:rg_move_complete[9] SIBLING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[10] SIBLING_ACQUIRING_GROUPS='' +epprd_rg:rg_move_complete[11] SIBLING_ACQUIRING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[12] SIBLING_RELEASING_GROUPS='' +epprd_rg:rg_move_complete[13] SIBLING_RELEASING_NODES_BY_GROUP='' +epprd_rg:rg_move_complete[174] set +a +epprd_rg:rg_move_complete[175] (( 0 != 0 )) +epprd_rg:rg_move_complete[182] : For each participating resource group, serially process the resources. +epprd_rg:rg_move_complete[251] (( 1 == 1 )) +epprd_rg:rg_move_complete[253] [[ REAL == EMUL ]] +epprd_rg:rg_move_complete[259] stopsrc -s rpc.lockd 0513-044 The rpc.lockd Subsystem was requested to stop. +epprd_rg:rg_move_complete[260] rcstopsrc=0 +epprd_rg:rg_move_complete[261] (( 0 != 0 )) +epprd_rg:rg_move_complete[266] (( TRY=0)) +epprd_rg:rg_move_complete[266] (( 0<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 1<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 2<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 3<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 EPP database started /usr/sap/EPP/SYS/exe/run/startj2eedb completed successfully +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 4<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z stopping ]] +epprd_rg:rg_move_complete[271] sleep 1 Starting Startup Agent sapstartsrv +epprd_rg:rg_move_complete[266] ((TRY++ )) +epprd_rg:rg_move_complete[266] (( 5<60)) +epprd_rg:rg_move_complete[268] lssrc -s rpc.lockd +epprd_rg:rg_move_complete[268] LC_ALL=C +epprd_rg:rg_move_complete[268] tail -1 +epprd_rg:rg_move_complete[268] read name subsystem pid state +epprd_rg:rg_move_complete[269] [[ ! -z '' ]] +epprd_rg:rg_move_complete[273] break +epprd_rg:rg_move_complete[277] [[ ! -z '' ]] +epprd_rg:rg_move_complete[300] : Sure that rpc.lockd stopped. Restart it. +epprd_rg:rg_move_complete[302] startsrc -s rpc.lockd 0513-059 The rpc.lockd Subsystem has been started. Subsystem PID is 22479318. +epprd_rg:rg_move_complete[303] rcstartsrc=0 +epprd_rg:rg_move_complete[304] (( 0 != 0 )) +epprd_rg:rg_move_complete[365] : If the resource group in this rg_move is now homeless, +epprd_rg:rg_move_complete[366] : then we need to put it into an error state. +epprd_rg:rg_move_complete[368] active_node=0 +epprd_rg:rg_move_complete[428] : If the resource group in this rg_move is now homeless_secondary, +epprd_rg:rg_move_complete[429] : then we need to put it into an errorsecondary state. +epprd_rg:rg_move_complete[437] : Set an error state for concurrent groups that have +epprd_rg:rg_move_complete[438] : been brought offline on this node by rg_move. +epprd_rg:rg_move_complete[453] AM_SYNC_CALLED_BY=RG_MOVE_COMPLETE +epprd_rg:rg_move_complete[453] export AM_SYNC_CALLED_BY +epprd_rg:rg_move_complete[454] process_resources :process_resources[3318] version=1.169 :process_resources[3321] STATUS=0 :process_resources[3322] sddsrv_off=FALSE :process_resources[3324] true :process_resources[3326] : call rgpa, and it will tell us what to do next :process_resources[3328] set -a :process_resources[3329] clRGPA :clRGPA[+47] [[ high = high ]] :clRGPA[+47] version=1.3 $Source: 61haes_r711 43haes/usr/sbin/cluster/clresmgrd/utils/clRGPA.sh 1$ :clRGPA[+49] usingVer=clrgpa :clRGPA[+54] clrgpa 2023-09-30T03:26:14.178554 clrgpa :clRGPA[+55] exit 0 :process_resources[3329] eval JOB_TYPE=NONE :process_resources[1] JOB_TYPE=NONE :process_resources[3330] RC=0 :process_resources[3331] set +a :process_resources[3333] (( 0 != 0 )) :process_resources[3342] RESOURCE_GROUPS='' :process_resources[3343] GROUPNAME='' :process_resources[3343] export GROUPNAME :process_resources[3353] IS_SERVICE_START=1 :process_resources[3354] IS_SERVICE_STOP=1 :process_resources[3360] [[ NONE == RELEASE ]] :process_resources[3360] [[ NONE == ONLINE ]] :process_resources[3729] break :process_resources[3740] : If sddsrv was turned off above, turn it back on again :process_resources[3742] [[ FALSE == TRUE ]] :process_resources[3747] exit 0 +epprd_rg:rg_move_complete[455] STATUS=0 +epprd_rg:rg_move_complete[456] : The exit status of process_resources is: 0 +epprd_rg:rg_move_complete[461] unset AM_SYNC_CALLED_BY +epprd_rg:rg_move_complete[462] [[ TRUE == TRUE ]] +epprd_rg:rg_move_complete[491] [[ -z '' ]] +epprd_rg:rg_move_complete[493] RESOURCE_GROUPS=epprd_rg +epprd_rg:rg_move_complete[499] GROUPNAME=epprd_rg +epprd_rg:rg_move_complete[499] export GROUPNAME +epprd_rg:rg_move_complete[501] cl_rrmethods2call postrg_move +epprd_rg:cl_rrmethods2call[56] version=%I% +epprd_rg:cl_rrmethods2call[84] RRMETHODS='' +epprd_rg:cl_rrmethods2call[85] NEED_RR_ENV_VARS=no +epprd_rg:cl_rrmethods2call[124] NEED_RR_ENV_VARS=yes +epprd_rg:cl_rrmethods2call[129] : Set the '*_REP_RESOURCE' variables if needed. +epprd_rg:cl_rrmethods2call[131] [[ yes == yes ]] +epprd_rg:cl_rrmethods2call[133] cllsres +epprd_rg:cl_rrmethods2call[133] 2> /dev/null +epprd_rg:cl_rrmethods2call[133] eval APPLICATIONS='"epprd_app"' EXPORT_FILESYSTEM='"/board_org' '/sapmnt/EPP"' FILESYSTEM='""' FORCED_VARYON='"false"' FSCHECK_TOOL='"fsck"' FS_BEFORE_IPADDR='"false"' MOUNT_FILESYSTEM='"/board;/board_org"' RECOVERY_METHOD='"sequential"' SERVICE_LABEL='"epprd"' SSA_DISK_FENCING='"false"' VG_AUTO_IMPORT='"false"' VOLUME_GROUP='"datavg"' USERDEFINED_RESOURCES='""' +epprd_rg:cl_rrmethods2call[1] APPLICATIONS=epprd_app +epprd_rg:cl_rrmethods2call[1] EXPORT_FILESYSTEM='/board_org /sapmnt/EPP' +epprd_rg:cl_rrmethods2call[1] FILESYSTEM='' +epprd_rg:cl_rrmethods2call[1] FORCED_VARYON=false +epprd_rg:cl_rrmethods2call[1] FSCHECK_TOOL=fsck +epprd_rg:cl_rrmethods2call[1] FS_BEFORE_IPADDR=false +epprd_rg:cl_rrmethods2call[1] MOUNT_FILESYSTEM='/board;/board_org' +epprd_rg:cl_rrmethods2call[1] RECOVERY_METHOD=sequential +epprd_rg:cl_rrmethods2call[1] SERVICE_LABEL=epprd +epprd_rg:cl_rrmethods2call[1] SSA_DISK_FENCING=false +epprd_rg:cl_rrmethods2call[1] VG_AUTO_IMPORT=false +epprd_rg:cl_rrmethods2call[1] VOLUME_GROUP=datavg +epprd_rg:cl_rrmethods2call[1] USERDEFINED_RESOURCES='' +epprd_rg:cl_rrmethods2call[137] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[142] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[147] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[152] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[157] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[162] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[167] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[172] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[182] [[ -z '' ]] +epprd_rg:cl_rrmethods2call[184] typeset sysmgdata +epprd_rg:cl_rrmethods2call[185] typeset reposmgdata +epprd_rg:cl_rrmethods2call[186] [[ -x /usr/es/sbin/cluster/xd_generic/xd_cli/clxd_list_mg_smit ]] +epprd_rg:cl_rrmethods2call[191] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[191] [[ -n '' ]] +epprd_rg:cl_rrmethods2call[197] echo '' +epprd_rg:cl_rrmethods2call[199] return 0 +epprd_rg:rg_move_complete[501] METHODS='' +epprd_rg:rg_move_complete[516] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. +epprd_rg:rg_move_complete[518] exit 0 Sep 30 2023 03:26:14 EVENT COMPLETED: rg_move_complete epprds 1 0 |2023-09-30T03:26:14|22393|EVENT COMPLETED: rg_move_complete epprds 1 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-30T03:26:14.299339 + echo '|2023-09-30T03:26:14.299339|INFO: rg_move_complete|epprd_rg|epprds|1|0' + 1>> /var/hacmp/availability/clavailability.log PowerHA SystemMirror Event Summary ---------------------------------------------------------------------------- Serial number for this event: 22393 Event: TE_RG_MOVE_ACQUIRE Start time: Sat Sep 30 03:26:07 2023 End time: Sat Sep 30 03:26:14 2023 Action: Resource: Script Name: ---------------------------------------------------------------------------- No resources changed as a result of this event ---------------------------------------------------------------------------- |EVENT_SUMMARY_START|TE_RG_MOVE_ACQUIRE|2023-09-30T03:26:07|2023-09-30T03:26:14|22393| |EVENT_NO_ACTION| |EVENT_SUMMARY_END| PowerHA SystemMirror Event Preamble ---------------------------------------------------------------------------- Serial number for this event: 22393 No resource state change initiated by the cluster manager as a result of this event ---------------------------------------------------------------------------- |EVENT_PREAMBLE_START|TE_JOIN_NODE_DEP_COMPLETE|2023-09-30T03:26:16|22393| |EVENT_NO_ACTIONS_QUEUED| |EVENT_PREAMBLE_END| Sep 30 2023 03:26:16 EVENT START: node_up_complete epprds |2023-09-30T03:26:16|22393|EVENT START: node_up_complete epprds| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-30T03:26:16.533464 + echo '|2023-09-30T03:26:16.533464|INFO: node_up_complete|epprds' + 1>> /var/hacmp/availability/clavailability.log + version=%I% + set -a + cllsparam -n epprda + eval NODE_NAME=epprda VERBOSE_LOGGING=high PS4=$'\'${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO]' $'\'' DEBUG_LEVEL=Standard LC_ALL=$'\'C\'' + NODE_NAME=epprda + VERBOSE_LOGGING=high :node_up_complete[1] PS4='${GROUPNAME:++$GROUPNAME}:${PROGNAME:-${0##*/}}${PS4_TIMER:+($SECONDS)}${PS4_LOOP:+:$PS4_LOOP}[${ERRNO:+${PS4_FUNC:-}+}${KSH_VERSION:+${.sh.fun:+${.sh.fun}:}}$LINENO] ' :node_up_complete[1] DEBUG_LEVEL=Standard :node_up_complete[1] LC_ALL=C :node_up_complete[80] set +a :node_up_complete[82] NODENAME=epprds :node_up_complete[83] RC=0 :node_up_complete[83] typeset -i RC :node_up_complete[84] UPDATESTATD=0 :node_up_complete[84] typeset -i UPDATESTATD :node_up_complete[86] LPM_IN_PROGRESS_DIR=/var/hacmp/.lpm_in_progress :node_up_complete[86] typeset LPM_IN_PROGRESS_DIR :node_up_complete[87] LPM_IN_PROGRESS_PREFIX=lpm :node_up_complete[87] typeset LPM_IN_PROGRESS_PREFIX :node_up_complete[88] STATE_FILE=/var/hacmp/cl_dr.state :node_up_complete[88] typeset STATE_FILE :node_up_complete[97] STATUS=0 :node_up_complete[99] set -u :node_up_complete[101] (( 1 < 1 )) :node_up_complete[107] START_MODE='' :node_up_complete[107] typeset START_MODE :node_up_complete[108] (( 1 > 1 )) :node_up_complete[114] : serial number for this event is 22393 :node_up_complete[118] RPCLOCKDSTOPPED=0 :node_up_complete[118] typeset -i RPCLOCKDSTOPPED :node_up_complete[119] [[ -f /tmp/.RPCLOCKDSTOPPED ]] :node_up_complete[127] clnodename :node_up_complete[127] wc -l :node_up_complete[127] (( 2 == 2 )) :node_up_complete[129] clodmget -f group -n HACMPgroup :node_up_complete[129] RESOURCE_GROUPS=epprd_rg :node_up_complete[132] clodmget -q group='epprd_rg AND name=EXPORT_FILESYSTEM' -f value -n HACMPresource :node_up_complete[132] EXPORTLIST=$'/board_org\n/sapmnt/EPP' :node_up_complete[133] [[ -n $'/board_org\n/sapmnt/EPP' ]] :node_up_complete[135] UPDATESTATD=1 :node_up_complete[136] [[ epprds == epprda ]] :node_up_complete[146] cl_update_statd :cl_update_statd(0)[+174] version=%I% :cl_update_statd(0)[+176] typeset -i RC=0 :cl_update_statd(0)[+178] LOCAL_FOUND= :cl_update_statd(0)[+179] TWIN_NAME= :cl_update_statd(0)[+180] [[ -z epprda ]] :cl_update_statd(0)[+181] :cl_update_statd(0)[+181] cl_get_path -S OP_SEP=~ :cl_update_statd(0)[+182] set -u :cl_update_statd(0)[+187] LOCAL_FOUND=true :cl_update_statd(0)[+189] TWIN_NAME=epprds :cl_update_statd(0)[+194] : Make sure statd is running locally :cl_update_statd(0)[+196] lssrc -s statd :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+196] grep -qw inoperative :cl_update_statd(0)[+196] rpcinfo -p :cl_update_statd(0)[+196] grep -qw status :cl_update_statd(0)[+196] LC_ALL=C :cl_update_statd(0)[+207] : Get the current twin, if there is one :cl_update_statd(0)[+209] :cl_update_statd(0)[+209] nfso -H sm_gethost :cl_update_statd(0)[+209] 2>& 1 CURTWIN=epprds :cl_update_statd(0)[+210] RC=0 :cl_update_statd(0)[+212] [[ -z true ]] :cl_update_statd(0)[+212] [[ -z epprds ]] :cl_update_statd(0)[+225] : Get the interface to the twin node :cl_update_statd(0)[+227] :cl_update_statd(0)[+227] get_node_ip epprds :cl_update_statd(0)[+9] (( 1 != 1 )) :cl_update_statd(0)[+15] Twin_Name=epprds :cl_update_statd(0)[+16] NewTwin= :cl_update_statd(0)[+19] : Get the Interface details for every interface on the twin node :cl_update_statd(0)[+20] : Reject interfaces on nodes that are not public boot addresses :cl_update_statd(0)[+21] : because those are the only ones we have state information for :cl_update_statd(0)[+23] :cl_update_statd(0)[+23] cllsif -J ~ -Sw -i epprda :cl_update_statd(0)[+23] LC_ALL=C LOCAL_NETWORK_INFO=epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+24] cllsif -J ~ -Sw -i epprds :cl_update_statd(0)[+24] LC_ALL=C :cl_update_statd(0)[+25] read adapt type network net_type attrib node ip_addr skip interface skip netmask skip skip prefix ip_family :cl_update_statd(0)[+25] IFS=~ :cl_update_statd(0)[+25] [[ public != public ]] :cl_update_statd(0)[+25] [[ boot != boot ]] :cl_update_statd(0)[+33] : Find the state of this candidate :cl_update_statd(0)[+33] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+37] :cl_update_statd(0)[+37] tr ./ xx :cl_update_statd(0)[+37] print 61.81.244.123 addr=i61x81x244x123_epprds :cl_update_statd(0)[+43] eval candidate_state=${i61x81x244x123_epprds:-down} :cl_update_statd(0)[+43] candidate_state=UP :cl_update_statd(0)[+46] : If state is UP, check to see if this node can talk to it :cl_update_statd(0)[+46] [[ UP == UP ]] :cl_update_statd(0)[+50] ping -w 5 -c 1 -q 61.81.244.123 :cl_update_statd(0)[+50] 1> /dev/null :cl_update_statd(0)[+61] echo epprda~boot~net_ether_01~ether~public~epprda~61.81.244.134~~en0~~255.255.255.0~~~24~AF_INET epprd~service~net_ether_01~ether~public~epprda~61.81.244.156~~~~255.255.255.0~~ignore~24~AF_INET :cl_update_statd(0)[+62] read lcl_adapt lcl_type lcl_network lcl_net_type lcl_attrib lcl_node lcl_ip_addr skip lcl_interface skip lcl_netmask skip skip lcl_prefix lcl_ip_family :cl_update_statd(0)[+61] tr \n :cl_update_statd(0)[+62] IFS=~ :cl_update_statd(0)[+62] [[ net_ether_01 != net_ether_01 ]] :cl_update_statd(0)[+62] [[ boot != boot ]] :cl_update_statd(0)[+62] [[ public != public ]] :cl_update_statd(0)[+62] [[ AF_INET != AF_INET ]] :cl_update_statd(0)[+62] [[ AF_INET == AF_INET ]] :cl_update_statd(0)[+71] :cl_update_statd(0)[+71] tr ./ xx :cl_update_statd(0)[+71] print 61.81.244.134 addr=i61x81x244x134_epprda :cl_update_statd(0)[+77] eval lcl_candidate_state=${i61x81x244x134_epprda:-down} :cl_update_statd(0)[+77] lcl_candidate_state=UP :cl_update_statd(0)[+77] [[ UP == UP ]] :cl_update_statd(0)[+81] : epprds is on the same network as an interface that is up :cl_update_statd(0)[+82] : on the local node, and the attributes match. :cl_update_statd(0)[+84] NewTwin=epprds :cl_update_statd(0)[+85] break :cl_update_statd(0)[+85] [[ -n epprds ]] :cl_update_statd(0)[+91] break :cl_update_statd(0)[+91] [[ -z epprds ]] :cl_update_statd(0)[+100] echo epprds :cl_update_statd(0)[+101] return 0 NEWTWIN=epprds :cl_update_statd(0)[+227] [[ -z epprds ]] :cl_update_statd(0)[+227] [[ epprds != epprds ]] :cl_update_statd(0)[+259] : RC is actually 0 :cl_update_statd(0)[+266] return 0 :node_up_complete[147] (( 0 )) :node_up_complete[151] break :node_up_complete[156] (( 1 )) :node_up_complete[158] (( 0 )) :node_up_complete[198] [[ TRUE == FALSE ]] :node_up_complete[268] refresh -s clcomd 0513-095 The request for subsystem refresh was completed successfully. :node_up_complete[270] : This is the final clRGinfo output :node_up_complete[272] clRGinfo -p -t :node_up_complete[272] 2>& 1 clRGinfo[431]: version I clRGinfo[517]: Number of resource groups = 0 clRGinfo[562]: cluster epprda_cluster is version = 22 clRGinfo[597]: no resource groups specified on command line - print all clRGinfo[685]: Current group is 'epprd_rg' get primary state info for state 6 get secondary state info for state 6 getPrimaryStateStr: using primary_table => primary_state_table get primary state info for state 4 get secondary state info for state 4 getPrimaryStateStr: using primary_table => primary_state_table Cluster Name: epprda_cluster Resource Group Name: epprd_rg Node Group State Delayed Timers ---------------------------------------------------------------- --------------- ------------------- epprda ONLINE epprds OFFLINE :node_up_complete[277] (( 0 == 0 )) :node_up_complete[279] [[ epprds != epprda ]] :node_up_complete[281] grep -w In_progress_file /var/hacmp/cl_dr.state :node_up_complete[281] 2> /dev/null :node_up_complete[281] cut -d= -f2 :node_up_complete[281] lpm_in_progress_file='' :node_up_complete[282] ls '/var/hacmp/.lpm_in_progress/lpm_*' :node_up_complete[282] 2> /dev/null :node_up_complete[282] lpm_in_progress_prefix='' :node_up_complete[283] [[ -n '' ]] :node_up_complete[300] exit 0 Sep 30 2023 03:26:16 EVENT COMPLETED: node_up_complete epprds 0 |2023-09-30T03:26:16|22393|EVENT COMPLETED: node_up_complete epprds 0| + clcycle clavailability.log + 1> /dev/null 2>& 1 + cltime + DATE=2023-09-30T03:26:16.788684 + echo '|2023-09-30T03:26:16.788684|INFO: node_up_complete|epprds|0' + 1>> /var/hacmp/availability/clavailability.log OK Instance Service on host epprda started ------------------------------------------- starting SAP Instance SCS01 Startup-Log is written to /home/eppadm/startsap_SCS01.log ------------------------------------------- /usr/sap/EPP/SCS01/exe/sapcontrol -prot NI_HTTP -nr 01 -function Start Instance on host epprda started Starting Startup Agent sapstartsrv OK Instance Service on host epprda started ------------------------------------------- starting SAP Instance J00 Startup-Log is written to /home/eppadm/startsap_J00.log ------------------------------------------- /usr/sap/EPP/J00/exe/sapcontrol -prot NI_HTTP -nr 00 -function Start Instance on host epprda started Starting Startup Agent sapstartsrv OK Instance Service on host epprda started ------------------------------------------- starting SAP Instance SMDA97 Startup-Log is written to /home/daaadm/startsap_SMDA97.log ------------------------------------------- /usr/sap/DAA/SMDA97/exe/sapcontrol -prot NI_HTTP -nr 97 -function Start Instance on host epprda started [1] 22675824 sapscsa(5):22675824: I am " ", but should be "sapsys ". sapscsa: system(/sapmnt/EPP/exe/uc/rs6000_64/sapmscsa pf=/usr/sap/sapwebdisp/sapwebdisp.pfl ); /sapmnt/EPP/exe/uc/rs6000_64/sapmscsa=>sapparam: SAPSYSTEMNAME neither in Profile nor in Commandline sapcscsa: SCSA defined. sapscsaId == 1049720 == 00100478 perm == 416 sapcscsa: SCSA attached at address a00000000000000 sapcscsa: SCSA initialized. /sapmnt/EPP/exe/uc/rs6000_64/log/SLOG10: No such file or directory rslgwr1(20): rstrbopen cannot open pre-existing SysLog file. /sapmnt/EPP/exe/uc/rs6000_64/log/SLOG10: No such file or directory rslgwr1(11): rstrbopen cannot open SysLog file. SysLog:iE1020230930032728002169200000sa no TTY eppadm : sapmscsa 0000 :SCSA 4096 /sapmnt/EPP/exe/uc/rs6000_64/sapmscsa: finished. /sapmnt/EPP/exe/uc/rs6000_64/log/SLOG10: No such file or directory rslgwr1(11): rstrbopen cannot open SysLog file. SysLog:lIM120230930032728002267500000IC : 0 :SAP Web Dispatcher&epprda.sebang.com&22675824& icxxrout2014 icmbnd: handle for "epprda:80" (on all adapters) successfully sent to server *** SAP Web Dispatcher up and operational (pid: 22675824, HTTP: 80, HTTPS: -) ***